Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backoff.hpp +52 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/DMAConnectivity.hpp +40 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/FileStore.hpp +63 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Functional.hpp +11 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GlooDeviceFactory.hpp +32 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/HashStore.hpp +59 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/NCCLUtils.hpp +718 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroup.hpp +748 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp +448 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp +271 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp +1232 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PyProcessGroup.hpp +249 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Store.hpp +128 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCTracing.hpp +58 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UnixSockUtils.hpp +25 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Utils.hpp +729 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Work.hpp +165 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/debug.h +23 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/error.h +56 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer.hpp +587 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/py_rref.h +84 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_context.h +335 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_impl.h +416 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_call.h +71 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_resp.h +26 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/torchscript_functions.h +37 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/types.h +62 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_call.h +38 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_remote_call.h +33 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/utils.h +90 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cpp_stacktraces.h +9 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cuda_enabled.h +13 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/object_ptr.h +67 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pyobject_preservation.h +7 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_scalars.h +161 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_new.h +136 -0
- mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/variadic.h +108 -0
- openflamingo/lib/python3.10/site-packages/nltk/test/all.py +26 -0
- openflamingo/lib/python3.10/site-packages/nltk/test/bnc.doctest +60 -0
- openflamingo/lib/python3.10/site-packages/nltk/test/childes_fixt.py +13 -0
- openflamingo/lib/python3.10/site-packages/nltk/test/chunk.doctest +372 -0
- openflamingo/lib/python3.10/site-packages/nltk/test/classify.doctest +202 -0
- openflamingo/lib/python3.10/site-packages/nltk/test/conftest.py +33 -0
- openflamingo/lib/python3.10/site-packages/nltk/test/corpus.doctest +0 -0
- openflamingo/lib/python3.10/site-packages/nltk/test/crubadan.doctest +65 -0
- openflamingo/lib/python3.10/site-packages/nltk/test/framenet.doctest +288 -0
- openflamingo/lib/python3.10/site-packages/nltk/test/generate.doctest +78 -0
- openflamingo/lib/python3.10/site-packages/nltk/test/logic.doctest +1096 -0
- openflamingo/lib/python3.10/site-packages/nltk/test/meteor.doctest +54 -0
- openflamingo/lib/python3.10/site-packages/nltk/test/metrics.doctest +321 -0
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backoff.hpp
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <chrono>
|
| 4 |
+
#include <random>
|
| 5 |
+
#include <thread>
|
| 6 |
+
|
| 7 |
+
#include <c10/macros/Macros.h>
|
| 8 |
+
|
| 9 |
+
namespace c10d {
|
| 10 |
+
|
| 11 |
+
class TORCH_API Backoff {
|
| 12 |
+
public:
|
| 13 |
+
virtual ~Backoff() = default;
|
| 14 |
+
|
| 15 |
+
virtual std::chrono::milliseconds nextBackoff() = 0;
|
| 16 |
+
virtual void reset() = 0;
|
| 17 |
+
|
| 18 |
+
void sleepBackoff() {
|
| 19 |
+
std::this_thread::sleep_for(nextBackoff());
|
| 20 |
+
}
|
| 21 |
+
};
|
| 22 |
+
|
| 23 |
+
class TORCH_API ExponentialBackoffWithJitter : public Backoff {
|
| 24 |
+
public:
|
| 25 |
+
ExponentialBackoffWithJitter();
|
| 26 |
+
|
| 27 |
+
std::chrono::milliseconds nextBackoff() override;
|
| 28 |
+
void reset() override;
|
| 29 |
+
|
| 30 |
+
public:
|
| 31 |
+
std::chrono::milliseconds initialInterval{500};
|
| 32 |
+
double randomizationFactor{0.5};
|
| 33 |
+
double multiplier{1.5};
|
| 34 |
+
std::chrono::milliseconds maxInterval{60000};
|
| 35 |
+
|
| 36 |
+
private:
|
| 37 |
+
std::mt19937 gen_;
|
| 38 |
+
std::chrono::milliseconds currentInterval_{0};
|
| 39 |
+
};
|
| 40 |
+
|
| 41 |
+
class TORCH_API FixedBackoff : public Backoff {
|
| 42 |
+
public:
|
| 43 |
+
FixedBackoff(std::chrono::milliseconds interval);
|
| 44 |
+
|
| 45 |
+
std::chrono::milliseconds nextBackoff() override;
|
| 46 |
+
void reset() override;
|
| 47 |
+
|
| 48 |
+
private:
|
| 49 |
+
std::chrono::milliseconds interval_;
|
| 50 |
+
};
|
| 51 |
+
|
| 52 |
+
} // namespace c10d
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/DMAConnectivity.hpp
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <optional>
|
| 4 |
+
|
| 5 |
+
#include <ATen/ATen.h>
|
| 6 |
+
|
| 7 |
+
namespace c10d {
|
| 8 |
+
|
| 9 |
+
struct TORCH_API DMAConnectivity : c10::intrusive_ptr_target {
|
| 10 |
+
c10::DeviceType device_type;
|
| 11 |
+
std::string connection_type;
|
| 12 |
+
|
| 13 |
+
// This is an NxN matrix representing the connectivity between N devices,
|
| 14 |
+
// where each element matrix[i][j] indicates the connectivity between device
|
| 15 |
+
// i and device j. A value of 0 denotes that there is no connection between
|
| 16 |
+
// device i and j. The meaning of non-zero values are specific to the
|
| 17 |
+
// connection type (e.g., for NVLink it represents the number of NVLinks).
|
| 18 |
+
std::vector<std::vector<int>> matrix;
|
| 19 |
+
|
| 20 |
+
explicit DMAConnectivity(
|
| 21 |
+
c10::DeviceType device_type,
|
| 22 |
+
std::string connection_type,
|
| 23 |
+
std::vector<std::vector<int>> matrix);
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
struct DMAConnectivityDetector : c10::intrusive_ptr_target {
|
| 27 |
+
virtual c10::intrusive_ptr<DMAConnectivity> detect() = 0;
|
| 28 |
+
virtual ~DMAConnectivityDetector() {}
|
| 29 |
+
};
|
| 30 |
+
|
| 31 |
+
C10_EXPORT void register_dma_connectivity_detector(
|
| 32 |
+
c10::DeviceType device_type,
|
| 33 |
+
const std::string& connection_type,
|
| 34 |
+
c10::intrusive_ptr<DMAConnectivityDetector> detector);
|
| 35 |
+
|
| 36 |
+
TORCH_API c10::intrusive_ptr<DMAConnectivity> detect_dma_connectivity(
|
| 37 |
+
c10::DeviceType device_type,
|
| 38 |
+
const std::string& connection_type);
|
| 39 |
+
|
| 40 |
+
} // namespace c10d
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/FileStore.hpp
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <sys/types.h>
|
| 4 |
+
|
| 5 |
+
#include <mutex>
|
| 6 |
+
#include <unordered_map>
|
| 7 |
+
|
| 8 |
+
#include <torch/csrc/distributed/c10d/Store.hpp>
|
| 9 |
+
|
| 10 |
+
namespace c10d {
|
| 11 |
+
|
| 12 |
+
class TORCH_API FileStore : public Store {
|
| 13 |
+
public:
|
| 14 |
+
explicit FileStore(std::string path, int numWorkers);
|
| 15 |
+
|
| 16 |
+
~FileStore() override;
|
| 17 |
+
|
| 18 |
+
void set(const std::string& key, const std::vector<uint8_t>& value) override;
|
| 19 |
+
|
| 20 |
+
std::vector<uint8_t> compareSet(
|
| 21 |
+
const std::string& key,
|
| 22 |
+
const std::vector<uint8_t>& expectedValue,
|
| 23 |
+
const std::vector<uint8_t>& desiredValue) override;
|
| 24 |
+
|
| 25 |
+
std::vector<uint8_t> get(const std::string& key) override;
|
| 26 |
+
|
| 27 |
+
int64_t add(const std::string& key, int64_t value) override;
|
| 28 |
+
|
| 29 |
+
int64_t getNumKeys() override;
|
| 30 |
+
|
| 31 |
+
bool deleteKey(const std::string& key) override;
|
| 32 |
+
|
| 33 |
+
bool check(const std::vector<std::string>& keys) override;
|
| 34 |
+
|
| 35 |
+
void wait(const std::vector<std::string>& keys) override;
|
| 36 |
+
|
| 37 |
+
void wait(
|
| 38 |
+
const std::vector<std::string>& keys,
|
| 39 |
+
const std::chrono::milliseconds& timeout) override;
|
| 40 |
+
|
| 41 |
+
// Returns the path used by the FileStore.
|
| 42 |
+
const std::string& getPath() const noexcept {
|
| 43 |
+
return path_;
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
protected:
|
| 47 |
+
int64_t addHelper(const std::string& key, int64_t i);
|
| 48 |
+
|
| 49 |
+
std::string path_;
|
| 50 |
+
off_t pos_{0};
|
| 51 |
+
|
| 52 |
+
int numWorkers_;
|
| 53 |
+
const std::string cleanupKey_;
|
| 54 |
+
const std::string refCountKey_;
|
| 55 |
+
const std::string regularPrefix_;
|
| 56 |
+
const std::string deletePrefix_;
|
| 57 |
+
|
| 58 |
+
std::unordered_map<std::string, std::vector<uint8_t>> cache_;
|
| 59 |
+
|
| 60 |
+
std::mutex activeFileOpLock_;
|
| 61 |
+
};
|
| 62 |
+
|
| 63 |
+
} // namespace c10d
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Functional.hpp
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/c10d/Work.hpp>
|
| 4 |
+
|
| 5 |
+
namespace c10d {
|
| 6 |
+
|
| 7 |
+
C10_EXPORT void register_work(
|
| 8 |
+
const at::Tensor& tensor,
|
| 9 |
+
const c10::intrusive_ptr<c10d::Work>& work);
|
| 10 |
+
|
| 11 |
+
} // namespace c10d
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GlooDeviceFactory.hpp
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifdef USE_C10D_GLOO
|
| 4 |
+
|
| 5 |
+
#include <string>
|
| 6 |
+
|
| 7 |
+
#include <c10/util/Registry.h>
|
| 8 |
+
#include <gloo/config.h>
|
| 9 |
+
#include <gloo/transport/device.h>
|
| 10 |
+
|
| 11 |
+
namespace c10d {
|
| 12 |
+
|
| 13 |
+
class TORCH_API GlooDeviceFactory {
|
| 14 |
+
public:
|
| 15 |
+
// Create new device instance for specific interface.
|
| 16 |
+
static std::shared_ptr<::gloo::transport::Device> makeDeviceForInterface(
|
| 17 |
+
const std::string& interface);
|
| 18 |
+
|
| 19 |
+
// Create new device instance for specific hostname or address.
|
| 20 |
+
static std::shared_ptr<::gloo::transport::Device> makeDeviceForHostname(
|
| 21 |
+
const std::string& hostname);
|
| 22 |
+
};
|
| 23 |
+
|
| 24 |
+
TORCH_DECLARE_SHARED_REGISTRY(
|
| 25 |
+
GlooDeviceRegistry,
|
| 26 |
+
::gloo::transport::Device,
|
| 27 |
+
const std::string&, /* interface */
|
| 28 |
+
const std::string& /* hostname */);
|
| 29 |
+
|
| 30 |
+
} // namespace c10d
|
| 31 |
+
|
| 32 |
+
#endif // USE_C10D_GLOO
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/HashStore.hpp
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <condition_variable>
|
| 4 |
+
#include <mutex>
|
| 5 |
+
#include <unordered_map>
|
| 6 |
+
|
| 7 |
+
#include <torch/csrc/distributed/c10d/Store.hpp>
|
| 8 |
+
|
| 9 |
+
namespace c10d {
|
| 10 |
+
|
| 11 |
+
class TORCH_API HashStore : public Store {
|
| 12 |
+
public:
|
| 13 |
+
~HashStore() override = default;
|
| 14 |
+
|
| 15 |
+
void set(const std::string& key, const std::vector<uint8_t>& data) override;
|
| 16 |
+
|
| 17 |
+
std::vector<uint8_t> compareSet(
|
| 18 |
+
const std::string& key,
|
| 19 |
+
const std::vector<uint8_t>& expectedValue,
|
| 20 |
+
const std::vector<uint8_t>& desiredValue) override;
|
| 21 |
+
|
| 22 |
+
std::vector<uint8_t> get(const std::string& key) override;
|
| 23 |
+
|
| 24 |
+
void wait(const std::vector<std::string>& keys) override {
|
| 25 |
+
wait(keys, timeout_);
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
void wait(
|
| 29 |
+
const std::vector<std::string>& keys,
|
| 30 |
+
const std::chrono::milliseconds& timeout) override;
|
| 31 |
+
|
| 32 |
+
int64_t add(const std::string& key, int64_t value) override;
|
| 33 |
+
|
| 34 |
+
int64_t getNumKeys() override;
|
| 35 |
+
|
| 36 |
+
bool check(const std::vector<std::string>& keys) override;
|
| 37 |
+
|
| 38 |
+
bool deleteKey(const std::string& key) override;
|
| 39 |
+
|
| 40 |
+
void append(const std::string& key, const std::vector<uint8_t>& value)
|
| 41 |
+
override;
|
| 42 |
+
|
| 43 |
+
std::vector<std::vector<uint8_t>> multiGet(
|
| 44 |
+
const std::vector<std::string>& keys) override;
|
| 45 |
+
|
| 46 |
+
void multiSet(
|
| 47 |
+
const std::vector<std::string>& keys,
|
| 48 |
+
const std::vector<std::vector<uint8_t>>& values) override;
|
| 49 |
+
|
| 50 |
+
// Returns true if this store support append, multiGet and multiSet
|
| 51 |
+
bool hasExtendedApi() const override;
|
| 52 |
+
|
| 53 |
+
protected:
|
| 54 |
+
std::unordered_map<std::string, std::vector<uint8_t>> map_;
|
| 55 |
+
std::mutex m_;
|
| 56 |
+
std::condition_variable cv_;
|
| 57 |
+
};
|
| 58 |
+
|
| 59 |
+
} // namespace c10d
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/NCCLUtils.hpp
ADDED
|
@@ -0,0 +1,718 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifdef USE_C10D_NCCL
|
| 4 |
+
|
| 5 |
+
#include <stdio.h>
|
| 6 |
+
#include <stdlib.h>
|
| 7 |
+
|
| 8 |
+
#include <memory>
|
| 9 |
+
#include <mutex>
|
| 10 |
+
#include <thread>
|
| 11 |
+
|
| 12 |
+
#include <ATen/ATen.h>
|
| 13 |
+
#include <ATen/cuda/CUDAEvent.h>
|
| 14 |
+
#include <c10/util/Exception.h>
|
| 15 |
+
#include <nccl.h>
|
| 16 |
+
#include <torch/csrc/distributed/c10d/TraceUtils.h>
|
| 17 |
+
#include <optional>
|
| 18 |
+
|
| 19 |
+
#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
|
| 20 |
+
(NCCL_MINOR >= 14)
|
| 21 |
+
#define NCCL_HAS_COMM_NONBLOCKING
|
| 22 |
+
#endif
|
| 23 |
+
|
| 24 |
+
#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
|
| 25 |
+
(NCCL_MINOR >= 18)
|
| 26 |
+
#define NCCL_HAS_COMM_SPLIT
|
| 27 |
+
#endif
|
| 28 |
+
|
| 29 |
+
// ncclGetLastError() is enabled only for NCCL versions 2.13+
|
| 30 |
+
// ncclRemoteError only exists in NCCL versions 2.13+
|
| 31 |
+
#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
|
| 32 |
+
(NCCL_MINOR >= 13)
|
| 33 |
+
#define ENABLE_NCCL_GET_LAST_ERROR
|
| 34 |
+
#define NCCL_REMOTE_ERROR
|
| 35 |
+
#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
|
| 36 |
+
#define ENABLE_NCCL_GET_LAST_ERROR
|
| 37 |
+
#define NCCL_REMOTE_ERROR
|
| 38 |
+
#endif
|
| 39 |
+
|
| 40 |
+
// Error checking is enabled only for NCCL versions 2.4+ since ncclCommAbort()
|
| 41 |
+
// and ncclCommGetAsyncError() are not supported in earlier versions.
|
| 42 |
+
#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
|
| 43 |
+
(NCCL_MINOR >= 4)
|
| 44 |
+
#define ENABLE_NCCL_ERROR_CHECKING
|
| 45 |
+
#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
|
| 46 |
+
#define ENABLE_NCCL_ERROR_CHECKING
|
| 47 |
+
#endif
|
| 48 |
+
|
| 49 |
+
// P2P is enabled only for NCCL versions 2.7+ since ncclSend()
|
| 50 |
+
// and ncclRecv() are not supported in earlier versions.
|
| 51 |
+
#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
|
| 52 |
+
(NCCL_MINOR >= 7)
|
| 53 |
+
#define ENABLE_NCCL_P2P_SUPPORT
|
| 54 |
+
#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
|
| 55 |
+
#define ENABLE_NCCL_P2P_SUPPORT
|
| 56 |
+
#endif
|
| 57 |
+
|
| 58 |
+
#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
|
| 59 |
+
(NCCL_MINOR >= 11)
|
| 60 |
+
#define ENABLE_NCCL_PREMUL_SUM_SUPPORT
|
| 61 |
+
#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
|
| 62 |
+
#define ENABLE_NCCL_PREMUL_SUM_SUPPORT
|
| 63 |
+
#endif
|
| 64 |
+
|
| 65 |
+
#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
|
| 66 |
+
(NCCL_MINOR >= 17)
|
| 67 |
+
#define NCCL_HAS_COMM_CTA_CGA
|
| 68 |
+
#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
|
| 69 |
+
#define NCCL_HAS_COMM_CTA_CGA
|
| 70 |
+
#endif
|
| 71 |
+
|
| 72 |
+
#if defined(NCCL_REGISTRATION_SUPPORTED) || \
|
| 73 |
+
((defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \
|
| 74 |
+
(NCCL_MINOR >= 19)))
|
| 75 |
+
#define NCCL_HAS_COMM_REGISTER
|
| 76 |
+
#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3)
|
| 77 |
+
#define NCCL_HAS_COMM_REGISTER
|
| 78 |
+
#endif
|
| 79 |
+
|
| 80 |
+
// Macro to throw on a non-successful NCCL return value.
|
| 81 |
+
#define C10D_NCCL_CHECK(cmd, failureReason) \
|
| 82 |
+
do { \
|
| 83 |
+
ncclResult_t result = cmd; \
|
| 84 |
+
if (result != ncclSuccess) { \
|
| 85 |
+
std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \
|
| 86 |
+
std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(result) + \
|
| 87 |
+
"\n" + getNcclErrorDetailStr(result, failureReason); \
|
| 88 |
+
TORCH_CHECK_WITH(DistBackendError, false, err); \
|
| 89 |
+
} \
|
| 90 |
+
} while (0)
|
| 91 |
+
|
| 92 |
+
// Macro to throw on a non-successful NCCL return value for NONBLOCKING calls.
|
| 93 |
+
#define C10D_NCCL_CHECK_NONBLOCKING(cmd, failureReason) \
|
| 94 |
+
do { \
|
| 95 |
+
ncclResult_t result = cmd; \
|
| 96 |
+
if (result != ncclSuccess && result != ncclInProgress) { \
|
| 97 |
+
std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \
|
| 98 |
+
std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(result) + \
|
| 99 |
+
"\n" + getNcclErrorDetailStr(result, failureReason); \
|
| 100 |
+
TORCH_CHECK_WITH(DistBackendError, false, err); \
|
| 101 |
+
} \
|
| 102 |
+
} while (0)
|
| 103 |
+
|
| 104 |
+
// Macro to throw on a non-successful NCCL return value, non-blocking.
|
| 105 |
+
#define C10D_NCCL_CHECK_TIMEOUT(cmd, comm, failureReason) \
|
| 106 |
+
ncclResult_t result = cmd; \
|
| 107 |
+
auto startTimepoint = std::chrono::steady_clock::now(); \
|
| 108 |
+
while (result == ncclInProgress) { \
|
| 109 |
+
if (nccl_nonblocking_timeout() > 0) { \
|
| 110 |
+
auto currentTimepoint = std::chrono::steady_clock::now(); \
|
| 111 |
+
auto timeElapsed = std::chrono::duration_cast<std::chrono::seconds>( \
|
| 112 |
+
currentTimepoint - startTimepoint) \
|
| 113 |
+
.count(); \
|
| 114 |
+
if (timeElapsed > nccl_nonblocking_timeout()) { \
|
| 115 |
+
std::string err = "NCCL timeout in: " + std::string(__FILE__) + ":" + \
|
| 116 |
+
std::to_string(__LINE__) + ", " + \
|
| 117 |
+
ncclGetErrorWithVersion(result) + "\n" + \
|
| 118 |
+
getNcclErrorDetailStr(result, failureReason); \
|
| 119 |
+
TORCH_CHECK_WITH(DistBackendError, false, err); \
|
| 120 |
+
} \
|
| 121 |
+
} \
|
| 122 |
+
ncclCommGetAsyncError(comm, &result); \
|
| 123 |
+
} \
|
| 124 |
+
if (result != ncclSuccess) { \
|
| 125 |
+
std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \
|
| 126 |
+
std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(result) + \
|
| 127 |
+
"\n" + getNcclErrorDetailStr(result, failureReason); \
|
| 128 |
+
TORCH_CHECK_WITH(DistBackendError, false, err); \
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
#define C10D_NCCL_CHECK_TIMEOUT_GROUPEND(cmd, comm, failureReason) \
|
| 132 |
+
ncclResult_t state = cmd; \
|
| 133 |
+
auto startTimepoint = std::chrono::steady_clock::now(); \
|
| 134 |
+
if (state == ncclInProgress) { \
|
| 135 |
+
do { \
|
| 136 |
+
if (nccl_nonblocking_timeout() > 0) { \
|
| 137 |
+
auto currentTimepoint = std::chrono::steady_clock::now(); \
|
| 138 |
+
auto timeElapsed = std::chrono::duration_cast<std::chrono::seconds>( \
|
| 139 |
+
currentTimepoint - startTimepoint) \
|
| 140 |
+
.count(); \
|
| 141 |
+
if (timeElapsed > nccl_nonblocking_timeout()) { \
|
| 142 |
+
std::string err = "NCCL timeout in: " + std::string(__FILE__) + \
|
| 143 |
+
":" + std::to_string(__LINE__) + ", " + \
|
| 144 |
+
ncclGetErrorWithVersion(state) + "\n" + \
|
| 145 |
+
getNcclErrorDetailStr(state, failureReason); \
|
| 146 |
+
TORCH_CHECK_WITH(DistBackendError, false, err); \
|
| 147 |
+
} \
|
| 148 |
+
} \
|
| 149 |
+
ncclCommGetAsyncError(comm->getNcclComm(), &state); \
|
| 150 |
+
} while (state == ncclInProgress); \
|
| 151 |
+
} \
|
| 152 |
+
if (state != ncclSuccess) { \
|
| 153 |
+
std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \
|
| 154 |
+
std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(state) + \
|
| 155 |
+
"\n" + getNcclErrorDetailStr(state, failureReason); \
|
| 156 |
+
TORCH_CHECK_WITH(DistBackendError, false, err); \
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
// Macro to print and abort on a non-successful NCCL return value.
|
| 160 |
+
#define C10D_NCCL_ASSERT(cmd) \
|
| 161 |
+
do { \
|
| 162 |
+
ncclResult_t result = cmd; \
|
| 163 |
+
if (result != ncclSuccess) { \
|
| 164 |
+
std::string err = ncclGetErrorWithVersion(result); \
|
| 165 |
+
fprintf( \
|
| 166 |
+
stderr, \
|
| 167 |
+
"NCCL error in: %s:%d, %s\n", \
|
| 168 |
+
__FILE__, \
|
| 169 |
+
__LINE__, \
|
| 170 |
+
err.c_str()); \
|
| 171 |
+
abort(); \
|
| 172 |
+
} \
|
| 173 |
+
} while (0)
|
| 174 |
+
|
| 175 |
+
namespace c10d {
|
| 176 |
+
#define DEFINE_CONSTANT(name, value) \
|
| 177 |
+
static c10::IValue name = value; \
|
| 178 |
+
static std::string name##_str = value;
|
| 179 |
+
// Update whenever changing contents or formatting of the dump
|
| 180 |
+
// (minor when adding fields, major when changing existing fields)
|
| 181 |
+
// Also update both JSON and Pickle dumps to make use of the newly defined
|
| 182 |
+
// field(s).
|
| 183 |
+
DEFINE_CONSTANT(version_val, "2.4");
|
| 184 |
+
DEFINE_CONSTANT(entries_key, "entries");
|
| 185 |
+
DEFINE_CONSTANT(nccl_comm_key, "nccl_comm_state");
|
| 186 |
+
DEFINE_CONSTANT(version_key, "version");
|
| 187 |
+
DEFINE_CONSTANT(pg_config_key, "pg_config");
|
| 188 |
+
DEFINE_CONSTANT(pg_status_key, "pg_status");
|
| 189 |
+
DEFINE_CONSTANT(record_id_key, "record_id");
|
| 190 |
+
DEFINE_CONSTANT(pg_id_key, "pg_id");
|
| 191 |
+
DEFINE_CONSTANT(pg_name_key, "process_group");
|
| 192 |
+
DEFINE_CONSTANT(collective_seq_id_key, "collective_seq_id");
|
| 193 |
+
DEFINE_CONSTANT(p2p_seq_id_key, "p2p_seq_id");
|
| 194 |
+
DEFINE_CONSTANT(is_p2p_key, "is_p2p");
|
| 195 |
+
DEFINE_CONSTANT(op_id_key, "op_id");
|
| 196 |
+
DEFINE_CONSTANT(profiling_name_key, "profiling_name");
|
| 197 |
+
DEFINE_CONSTANT(input_sizes_key, "input_sizes");
|
| 198 |
+
DEFINE_CONSTANT(input_dtypes_key, "input_dtypes");
|
| 199 |
+
DEFINE_CONSTANT(output_sizes_key, "output_sizes");
|
| 200 |
+
DEFINE_CONSTANT(output_dtypes_key, "output_dtypes");
|
| 201 |
+
DEFINE_CONSTANT(time_created_key, "time_created_ns");
|
| 202 |
+
DEFINE_CONSTANT(duration_key, "duration_ms");
|
| 203 |
+
DEFINE_CONSTANT(timeout_key, "timeout_ms");
|
| 204 |
+
DEFINE_CONSTANT(frames_key, "frames");
|
| 205 |
+
DEFINE_CONSTANT(state_key, "state");
|
| 206 |
+
DEFINE_CONSTANT(line_key, "line");
|
| 207 |
+
DEFINE_CONSTANT(name_key, "name");
|
| 208 |
+
DEFINE_CONSTANT(filename_key, "filename");
|
| 209 |
+
DEFINE_CONSTANT(retired_key, "retired");
|
| 210 |
+
DEFINE_CONSTANT(time_discovered_started_key, "time_discovered_started_ns");
|
| 211 |
+
DEFINE_CONSTANT(time_discovered_completed_key, "time_discovered_completed_ns");
|
| 212 |
+
DEFINE_CONSTANT(completed_state, "completed");
|
| 213 |
+
DEFINE_CONSTANT(scheduled_state, "scheduled");
|
| 214 |
+
DEFINE_CONSTANT(started_state, "started");
|
| 215 |
+
#undef DEFINE_CONSTANT
|
| 216 |
+
|
| 217 |
+
TORCH_API size_t hashTensors(const std::vector<at::Tensor>& tensors);
|
| 218 |
+
TORCH_API std::string getNcclVersion();
|
| 219 |
+
TORCH_API std::string ncclGetErrorWithVersion(ncclResult_t error);
|
| 220 |
+
bool nccl_use_nonblocking();
|
| 221 |
+
int nccl_nonblocking_timeout();
|
| 222 |
+
|
| 223 |
+
// Provides additional detail into NCCL error codes based on when these are
|
| 224 |
+
// thrown in the NCCL codebase.
|
| 225 |
+
TORCH_API std::string getNcclErrorDetailStr(
|
| 226 |
+
ncclResult_t error,
|
| 227 |
+
std::optional<std::string> processGroupFailureReason = std::nullopt);
|
| 228 |
+
|
| 229 |
+
// Write NCCL debug info to local disk or any storage users define.
|
| 230 |
+
// There are some constrains we set for the debug info writer:
|
| 231 |
+
// 1. The writer should only be registered once.
|
| 232 |
+
// 2. Once registered, users cannot change it including un-register.
|
| 233 |
+
// 3. It is recommended to register the customized writer in the trainer setup,
|
| 234 |
+
// If users don't register before calling launchAsyncDebugDump, then users
|
| 235 |
+
// lose the chance to register (and the default writer will be
|
| 236 |
+
// auto-registered).
|
| 237 |
+
class TORCH_API DebugInfoWriter {
|
| 238 |
+
public:
|
| 239 |
+
virtual ~DebugInfoWriter() = default;
|
| 240 |
+
virtual void write(const std::string& ncclTrace);
|
| 241 |
+
static DebugInfoWriter& getWriter(int rank);
|
| 242 |
+
static void registerWriter(std::unique_ptr<DebugInfoWriter> writer);
|
| 243 |
+
virtual std::string getWriterTarget() {
|
| 244 |
+
return filename_;
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
protected:
|
| 248 |
+
DebugInfoWriter(std::string namePrefix, int rank) {
|
| 249 |
+
filename_ = c10::str(namePrefix, rank);
|
| 250 |
+
}
|
| 251 |
+
std::string filename_;
|
| 252 |
+
|
| 253 |
+
private:
|
| 254 |
+
static std::unique_ptr<DebugInfoWriter> writer_;
|
| 255 |
+
static std::atomic<bool> hasWriterRegistered_;
|
| 256 |
+
};
|
| 257 |
+
|
| 258 |
+
// RAII wrapper for NCCL communicator
|
| 259 |
+
class NCCLComm {
|
| 260 |
+
public:
|
| 261 |
+
explicit NCCLComm(ncclComm_t ncclComm)
|
| 262 |
+
: ncclComm_(ncclComm),
|
| 263 |
+
aborted_(false),
|
| 264 |
+
ncclAsyncErr_(ncclSuccess),
|
| 265 |
+
commFailureReason_(std::nullopt),
|
| 266 |
+
initialized_(false) {}
|
| 267 |
+
|
| 268 |
+
NCCLComm() : NCCLComm(nullptr) {}
|
| 269 |
+
|
| 270 |
+
~NCCLComm() noexcept {
|
| 271 |
+
// Add lock in this destructor, as aborted_ needs to be read after memory
|
| 272 |
+
// barrier here.
|
| 273 |
+
std::unique_lock<std::mutex> lock(mutex_);
|
| 274 |
+
if (ncclComm_ && initialized_ && !aborted_) {
|
| 275 |
+
#ifdef ENABLE_NCCL_ERROR_CHECKING
|
| 276 |
+
// Use ncclCommAbort instead of ncclCommDestroy here since
|
| 277 |
+
// ncclCommDestroy could block forever waiting for work to complete on
|
| 278 |
+
// the communicator.
|
| 279 |
+
C10D_NCCL_ASSERT(::ncclCommAbort(ncclComm_));
|
| 280 |
+
#else
|
| 281 |
+
C10D_NCCL_ASSERT(::ncclCommDestroy(ncclComm_));
|
| 282 |
+
#endif
|
| 283 |
+
}
|
| 284 |
+
}
|
| 285 |
+
|
| 286 |
+
static std::shared_ptr<NCCLComm> create(
|
| 287 |
+
int numRanks,
|
| 288 |
+
int rank,
|
| 289 |
+
ncclUniqueId commId) {
|
| 290 |
+
auto comm = std::make_shared<NCCLComm>();
|
| 291 |
+
C10D_NCCL_CHECK(
|
| 292 |
+
ncclCommInitRank(&(comm->ncclComm_), numRanks, commId, rank),
|
| 293 |
+
std::nullopt);
|
| 294 |
+
comm->ncclId_ = commId;
|
| 295 |
+
comm->rank_ = rank;
|
| 296 |
+
comm->initialized_ = true;
|
| 297 |
+
return comm;
|
| 298 |
+
}
|
| 299 |
+
|
| 300 |
+
#ifdef NCCL_HAS_COMM_NONBLOCKING
|
| 301 |
+
static std::shared_ptr<NCCLComm> create(
|
| 302 |
+
int numRanks,
|
| 303 |
+
int rank,
|
| 304 |
+
ncclUniqueId commId,
|
| 305 |
+
ncclConfig_t& config) {
|
| 306 |
+
auto comm = std::make_shared<NCCLComm>();
|
| 307 |
+
bool isInitialized = false;
|
| 308 |
+
if (nccl_use_nonblocking()) {
|
| 309 |
+
config.blocking = 0;
|
| 310 |
+
LOG(INFO) << "Rank " << rank
|
| 311 |
+
<< ": creating NCCL communicator in nonblocking mode";
|
| 312 |
+
C10D_NCCL_CHECK_NONBLOCKING(
|
| 313 |
+
ncclCommInitRankConfig(
|
| 314 |
+
&(comm->ncclComm_), numRanks, commId, rank, &config),
|
| 315 |
+
std::nullopt);
|
| 316 |
+
} else {
|
| 317 |
+
C10D_NCCL_CHECK(
|
| 318 |
+
ncclCommInitRankConfig(
|
| 319 |
+
&(comm->ncclComm_), numRanks, commId, rank, &config),
|
| 320 |
+
std::nullopt);
|
| 321 |
+
// under blocking mode, comm is initialized after NCCL CHECK
|
| 322 |
+
isInitialized = true;
|
| 323 |
+
}
|
| 324 |
+
comm->ncclId_ = commId;
|
| 325 |
+
comm->rank_ = rank;
|
| 326 |
+
comm->initialized_ = isInitialized;
|
| 327 |
+
return comm;
|
| 328 |
+
}
|
| 329 |
+
|
| 330 |
+
static std::shared_ptr<NCCLComm> split(
|
| 331 |
+
NCCLComm* source,
|
| 332 |
+
int color_id,
|
| 333 |
+
int rank,
|
| 334 |
+
ncclConfig_t& config,
|
| 335 |
+
std::vector<uint64_t>& ranks_ull);
|
| 336 |
+
#endif
|
| 337 |
+
|
| 338 |
+
#if defined(IS_NCCLX) && defined(NCCL_COMM_DUMP)
|
| 339 |
+
std::unordered_map<std::string, std::string> ncclCommDump() {
|
| 340 |
+
std::unordered_map<std::string, std::string> dump;
|
| 341 |
+
if (isAborted()) {
|
| 342 |
+
LOG(INFO) << "Communicator was aborted before trying to dump its state.";
|
| 343 |
+
return dump;
|
| 344 |
+
}
|
| 345 |
+
C10D_NCCL_CHECK(::ncclCommDump(ncclComm_, dump), std::nullopt);
|
| 346 |
+
return dump;
|
| 347 |
+
}
|
| 348 |
+
#endif
|
| 349 |
+
|
| 350 |
+
ncclUniqueId getNcclId() {
|
| 351 |
+
return ncclId_;
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
// Must not be copyable
|
| 355 |
+
NCCLComm(const NCCLComm&) = delete;
|
| 356 |
+
NCCLComm& operator=(const NCCLComm&) = delete;
|
| 357 |
+
|
| 358 |
+
// Do not support move assignment as there is no valid use case
|
| 359 |
+
NCCLComm& operator=(NCCLComm&& other) = delete;
|
| 360 |
+
|
| 361 |
+
// Move constructable
|
| 362 |
+
NCCLComm(NCCLComm&& other) {
|
| 363 |
+
// Using other's lock, as it reads other's states
|
| 364 |
+
// Can not use this.mutex_, as this object is being constructed.
|
| 365 |
+
std::unique_lock<std::mutex> lock(other.mutex_);
|
| 366 |
+
std::swap(ncclComm_, other.ncclComm_);
|
| 367 |
+
std::swap(aborted_, other.aborted_);
|
| 368 |
+
std::swap(ncclAsyncErr_, other.ncclAsyncErr_);
|
| 369 |
+
std::swap(initialized_, other.initialized_);
|
| 370 |
+
}
|
| 371 |
+
|
| 372 |
+
ncclComm_t getNcclComm();
|
| 373 |
+
|
| 374 |
+
std::optional<std::string> getNcclCommFailureReason() const {
|
| 375 |
+
std::unique_lock<std::mutex> lock(mutex_);
|
| 376 |
+
return commFailureReason_;
|
| 377 |
+
}
|
| 378 |
+
|
| 379 |
+
void ncclCommAbort(
|
| 380 |
+
std::optional<std::string> commFailureReason = std::nullopt) {
|
| 381 |
+
std::unique_lock<std::mutex> lock(mutex_);
|
| 382 |
+
#ifdef ENABLE_NCCL_ERROR_CHECKING
|
| 383 |
+
if (aborted_ && !initialized_) {
|
| 384 |
+
// Should not abort twice.
|
| 385 |
+
return;
|
| 386 |
+
}
|
| 387 |
+
|
| 388 |
+
#ifdef NCCL_HAS_COMM_REGISTER
|
| 389 |
+
// Deregister all registered segments before aborting.
|
| 390 |
+
for (auto& it : registeredSegmentHandles_) {
|
| 391 |
+
void* handle = it.second;
|
| 392 |
+
C10D_NCCL_CHECK(
|
| 393 |
+
::ncclCommDeregister(ncclComm_, handle),
|
| 394 |
+
c10::str(
|
| 395 |
+
"Failed to deregister segment handle ",
|
| 396 |
+
handle,
|
| 397 |
+
" on ncclComm_ ",
|
| 398 |
+
ncclComm_));
|
| 399 |
+
}
|
| 400 |
+
registeredSegmentHandles_.clear();
|
| 401 |
+
#endif
|
| 402 |
+
|
| 403 |
+
// Set true failure reason if provided by ProcessGroupNCCL (e.g. work
|
| 404 |
+
// timeout)
|
| 405 |
+
commFailureReason_ = commFailureReason;
|
| 406 |
+
LOG(INFO) << "Aborting ncclComm_ " << ncclComm_ << " with reason: "
|
| 407 |
+
<< (commFailureReason ? *commFailureReason
|
| 408 |
+
: "No abort reason provided.");
|
| 409 |
+
#ifndef NCCL_HAS_COMM_NONBLOCKING
|
| 410 |
+
C10D_NCCL_CHECK(::ncclCommAbort(ncclComm_), commFailureReason_);
|
| 411 |
+
#else
|
| 412 |
+
C10D_NCCL_CHECK_TIMEOUT(
|
| 413 |
+
::ncclCommAbort(ncclComm_), ncclComm_, commFailureReason_);
|
| 414 |
+
#endif
|
| 415 |
+
aborted_ = true;
|
| 416 |
+
ncclComm_ = nullptr;
|
| 417 |
+
|
| 418 |
+
// Set an appropriate error so that we avoid using the communicator.
|
| 419 |
+
if (ncclAsyncErr_ == ncclSuccess) {
|
| 420 |
+
ncclAsyncErr_ = ncclSystemError;
|
| 421 |
+
}
|
| 422 |
+
#else
|
| 423 |
+
// This is a NOOP, if error checks are disabled.
|
| 424 |
+
return;
|
| 425 |
+
#endif
|
| 426 |
+
}
|
| 427 |
+
|
| 428 |
+
bool isAborted() const {
|
| 429 |
+
std::unique_lock<std::mutex> lock(mutex_);
|
| 430 |
+
return aborted_;
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
uint64_t getCommSplitCounter() const {
|
| 434 |
+
return ncclCommSplitCounter_;
|
| 435 |
+
}
|
| 436 |
+
|
| 437 |
+
ncclResult_t checkForNcclError() {
|
| 438 |
+
std::unique_lock<std::mutex> lock(mutex_);
|
| 439 |
+
#ifdef ENABLE_NCCL_ERROR_CHECKING
|
| 440 |
+
if (ncclAsyncErr_ != ncclSuccess) {
|
| 441 |
+
return ncclAsyncErr_;
|
| 442 |
+
}
|
| 443 |
+
C10D_NCCL_CHECK(
|
| 444 |
+
ncclCommGetAsyncError(ncclComm_, &ncclAsyncErr_), commFailureReason_);
|
| 445 |
+
return ncclAsyncErr_;
|
| 446 |
+
#else
|
| 447 |
+
// Always return success, if error checks are disabled.
|
| 448 |
+
return ncclSuccess;
|
| 449 |
+
#endif
|
| 450 |
+
}
|
| 451 |
+
|
| 452 |
+
ncclResult_t registerSegment(void* ptr, size_t size) {
|
| 453 |
+
std::unique_lock<std::mutex> lock(mutex_);
|
| 454 |
+
#ifdef NCCL_HAS_COMM_REGISTER
|
| 455 |
+
// We register only segments from cache allocator
|
| 456 |
+
// which are guaranteed to be with disjoint addr ranges. Thus, a ptr always
|
| 457 |
+
// maps to a unique handle and should not be registered before the current
|
| 458 |
+
// ptr is deregistered and freed.
|
| 459 |
+
TORCH_CHECK(
|
| 460 |
+
registeredSegmentHandles_.count(ptr) == 0,
|
| 461 |
+
"Segment with ptr ",
|
| 462 |
+
ptr,
|
| 463 |
+
" has already been registered on ncclComm_ ",
|
| 464 |
+
ncclComm_);
|
| 465 |
+
|
| 466 |
+
void* handle;
|
| 467 |
+
C10D_NCCL_CHECK(
|
| 468 |
+
ncclCommRegister(ncclComm_, ptr, size, &handle),
|
| 469 |
+
c10::str(
|
| 470 |
+
"Failed to register segment with ptr ",
|
| 471 |
+
ptr,
|
| 472 |
+
", size ",
|
| 473 |
+
size,
|
| 474 |
+
" on ncclComm_ ",
|
| 475 |
+
ncclComm_));
|
| 476 |
+
registeredSegmentHandles_[ptr] = handle;
|
| 477 |
+
return ncclSuccess;
|
| 478 |
+
#else
|
| 479 |
+
return ncclInvalidUsage;
|
| 480 |
+
#endif
|
| 481 |
+
}
|
| 482 |
+
|
| 483 |
+
ncclResult_t deregisterSegment(void* ptr) {
|
| 484 |
+
std::unique_lock<std::mutex> lock(mutex_);
|
| 485 |
+
#ifdef NCCL_HAS_COMM_REGISTER
|
| 486 |
+
TORCH_CHECK(
|
| 487 |
+
registeredSegmentHandles_.count(ptr) == 1,
|
| 488 |
+
"Segment with ptr ",
|
| 489 |
+
ptr,
|
| 490 |
+
" is not registered on ncclComm_ ",
|
| 491 |
+
ncclComm_);
|
| 492 |
+
|
| 493 |
+
void* handle = registeredSegmentHandles_[ptr];
|
| 494 |
+
C10D_NCCL_CHECK(
|
| 495 |
+
ncclCommDeregister(ncclComm_, handle),
|
| 496 |
+
c10::str(
|
| 497 |
+
"Failed to deregister segment handle ",
|
| 498 |
+
handle,
|
| 499 |
+
", with ptr ",
|
| 500 |
+
ptr,
|
| 501 |
+
" on ncclComm_ ",
|
| 502 |
+
ncclComm_));
|
| 503 |
+
registeredSegmentHandles_.erase(ptr);
|
| 504 |
+
return ncclSuccess;
|
| 505 |
+
#else
|
| 506 |
+
return ncclInvalidUsage;
|
| 507 |
+
#endif
|
| 508 |
+
}
|
| 509 |
+
|
| 510 |
+
friend class ProcessGroupNCCL;
|
| 511 |
+
|
| 512 |
+
protected:
|
| 513 |
+
// a helper function to wait until the communicator is initialized;
|
| 514 |
+
void waitUntilInitialized(int timeoutSecs);
|
| 515 |
+
ncclComm_t ncclComm_;
|
| 516 |
+
// Unique nccl_id for this communicator.
|
| 517 |
+
ncclUniqueId ncclId_;
|
| 518 |
+
bool aborted_;
|
| 519 |
+
uint64_t ncclCommSplitCounter_{0};
|
| 520 |
+
ncclResult_t ncclAsyncErr_;
|
| 521 |
+
mutable std::mutex mutex_;
|
| 522 |
+
// Rank that this communicator corresponds to.
|
| 523 |
+
int rank_;
|
| 524 |
+
// Optional reason for communicator failure, provided by ProcessGroupNCCL for
|
| 525 |
+
// better error messaging.
|
| 526 |
+
std::optional<std::string> commFailureReason_;
|
| 527 |
+
bool initialized_{false};
|
| 528 |
+
#ifdef NCCL_HAS_COMM_REGISTER
|
| 529 |
+
// Stores handlers for tensors registered by NCCL
|
| 530 |
+
std::unordered_map<void*, void*> registeredSegmentHandles_;
|
| 531 |
+
#endif
|
| 532 |
+
};
|
| 533 |
+
|
| 534 |
+
// Helper that automatically cleans up premul sums.
|
| 535 |
+
struct ncclRedOpRAII {
|
| 536 |
+
ncclRedOpRAII() = default;
|
| 537 |
+
ncclRedOpRAII(ncclRedOp_t op) : op_(op) {}
|
| 538 |
+
ncclRedOpRAII(ncclRedOp_t op, ncclComm_t comm)
|
| 539 |
+
: op_(op), comm_(comm), premul_sum_(true) {}
|
| 540 |
+
ncclRedOpRAII(const ncclRedOpRAII&) = delete;
|
| 541 |
+
ncclRedOpRAII& operator=(const ncclRedOpRAII&) = delete;
|
| 542 |
+
ncclRedOpRAII(ncclRedOpRAII&& tmp) : ncclRedOpRAII() {
|
| 543 |
+
std::swap(tmp.op_, this->op_);
|
| 544 |
+
std::swap(tmp.comm_, this->comm_);
|
| 545 |
+
std::swap(tmp.premul_sum_, this->premul_sum_);
|
| 546 |
+
}
|
| 547 |
+
#if defined(ENABLE_NCCL_PREMUL_SUM_SUPPORT)
|
| 548 |
+
~ncclRedOpRAII() {
|
| 549 |
+
if (premul_sum_) {
|
| 550 |
+
ncclRedOpDestroy(op_, comm_);
|
| 551 |
+
}
|
| 552 |
+
}
|
| 553 |
+
#endif
|
| 554 |
+
operator ncclRedOp_t() const {
|
| 555 |
+
return op_;
|
| 556 |
+
}
|
| 557 |
+
ncclRedOp_t op_;
|
| 558 |
+
ncclComm_t comm_;
|
| 559 |
+
bool premul_sum_ = false;
|
| 560 |
+
};
|
| 561 |
+
|
| 562 |
+
/* Helper used by work::getDuration() and nccl flight recorder */
|
| 563 |
+
float getDurationFromEvent(
|
| 564 |
+
at::cuda::CUDAEvent& ncclStartEvent,
|
| 565 |
+
at::cuda::CUDAEvent& ncclEndEvent);
|
| 566 |
+
|
| 567 |
+
struct NCCLTraceBuffer {
|
| 568 |
+
static NCCLTraceBuffer* get() {
|
| 569 |
+
// intentionally leak on exit
|
| 570 |
+
// because this will hold python state that may get destructed
|
| 571 |
+
static NCCLTraceBuffer* instance = new NCCLTraceBuffer();
|
| 572 |
+
return instance;
|
| 573 |
+
}
|
| 574 |
+
NCCLTraceBuffer() {
|
| 575 |
+
max_entries_ = getCvarInt({"TORCH_NCCL_TRACE_BUFFER_SIZE"}, 0);
|
| 576 |
+
capture_cpp_stack_ = getCvarBool({"TORCH_NCCL_TRACE_CPP_STACK"}, false);
|
| 577 |
+
enabled_ = max_entries_ > 0;
|
| 578 |
+
}
|
| 579 |
+
using Event = at::cuda::CUDAEvent;
|
| 580 |
+
struct Entry {
|
| 581 |
+
size_t id_; // incremented id in the trace buffer
|
| 582 |
+
// used to figure out where in the circular entries
|
| 583 |
+
// buffer this entry will be located to
|
| 584 |
+
// update state information
|
| 585 |
+
size_t pg_id_;
|
| 586 |
+
std::tuple<std::string, std::string> pg_name_; // <group_name, group_desc>
|
| 587 |
+
|
| 588 |
+
// collective_seq_id and p2p_seq_id refer to actual kernel launches (e.g. 1
|
| 589 |
+
// per coalesced group).
|
| 590 |
+
// collective_seq_id only increments for true collective operations (over
|
| 591 |
+
// all ranks in the group). p2p_seq_id only increments over non-collective
|
| 592 |
+
// operations in the group. op_id refers to logical operations (e.g. one per
|
| 593 |
+
// op inside coalesced group)
|
| 594 |
+
size_t collective_seq_id_;
|
| 595 |
+
size_t p2p_seq_id_;
|
| 596 |
+
size_t op_id_;
|
| 597 |
+
std::string profiling_name_;
|
| 598 |
+
|
| 599 |
+
std::shared_ptr<torch::CapturedTraceback> traceback_;
|
| 600 |
+
// we borrow pointers to start_ and end_ so we can query the state
|
| 601 |
+
// on reporting. However, once the event is completed, the call
|
| 602 |
+
// to `complete` will clear these.
|
| 603 |
+
Event *start_, *end_;
|
| 604 |
+
|
| 605 |
+
// timestamp when the entry was created, likely close to the time the work
|
| 606 |
+
// was 'enqueued'- not necessarily started
|
| 607 |
+
c10::time_t time_created_;
|
| 608 |
+
|
| 609 |
+
// configured timeout for this entry
|
| 610 |
+
c10::time_t timeout_ms_;
|
| 611 |
+
|
| 612 |
+
// Is this a P2P event?
|
| 613 |
+
bool isP2P_;
|
| 614 |
+
|
| 615 |
+
std::optional<float> duration_;
|
| 616 |
+
|
| 617 |
+
// timestamp when our CPU threads discovered that the kernel started.
|
| 618 |
+
// will always be _after_ it actually started, and can be very late
|
| 619 |
+
// if the watchdog thread got stuck on CUDA APIs.
|
| 620 |
+
std::optional<c10::time_t> time_discovered_started_;
|
| 621 |
+
|
| 622 |
+
// timestamp when our CPU threads discovered that the kernel completed.
|
| 623 |
+
// will always be _after_ it actually complated, and can be the same time
|
| 624 |
+
// as the discovery of the start if the watchdog thread is stuck on CUDA
|
| 625 |
+
// APIs
|
| 626 |
+
std::optional<c10::time_t> time_discovered_completed_;
|
| 627 |
+
|
| 628 |
+
// size information for input/output tensors
|
| 629 |
+
c10::SmallVector<int, 4> input_dims_;
|
| 630 |
+
std::vector<c10::ScalarType> input_dtypes_;
|
| 631 |
+
c10::SmallVector<int, 4> output_dims_;
|
| 632 |
+
std::vector<c10::ScalarType> output_dtypes_;
|
| 633 |
+
c10::SmallVector<int64_t, 8> sizes_; // flattened from inputs, outputs
|
| 634 |
+
bool retired_ = false; // is this work entry no longer in the workMetaList_?
|
| 635 |
+
// a retired but not completed event has timed out
|
| 636 |
+
};
|
| 637 |
+
|
| 638 |
+
bool enabled_ = false;
|
| 639 |
+
bool capture_cpp_stack_ = false;
|
| 640 |
+
std::mutex mutex_;
|
| 641 |
+
std::vector<Entry> entries_;
|
| 642 |
+
size_t max_entries_ = 0;
|
| 643 |
+
size_t next_ = 0;
|
| 644 |
+
size_t id_ = 0;
|
| 645 |
+
std::map<size_t, std::shared_ptr<ProcessGroupStatus>> all_pg_status_ = {};
|
| 646 |
+
std::map<std::tuple<std::string, std::string>, std::vector<uint64_t>>
|
| 647 |
+
pg_name_to_ranks_ = {};
|
| 648 |
+
|
| 649 |
+
std::optional<size_t> record(
|
| 650 |
+
size_t pg_id,
|
| 651 |
+
const std::tuple<std::string, std::string>& pg_name,
|
| 652 |
+
size_t collective_seq_id,
|
| 653 |
+
size_t p2p_seq_id,
|
| 654 |
+
size_t op_id,
|
| 655 |
+
std::string profiling_name,
|
| 656 |
+
const std::vector<at::Tensor>& inputs,
|
| 657 |
+
const std::vector<at::Tensor>& outputs,
|
| 658 |
+
Event* start,
|
| 659 |
+
Event* end,
|
| 660 |
+
std::chrono::milliseconds timeout_ms,
|
| 661 |
+
std::shared_ptr<ProcessGroupStatus> pg_status,
|
| 662 |
+
bool isP2P);
|
| 663 |
+
|
| 664 |
+
void record_pg_ranks(
|
| 665 |
+
const std::tuple<std::string, std::string>& pg_name,
|
| 666 |
+
std::vector<uint64_t> ranks);
|
| 667 |
+
|
| 668 |
+
void update_state(Entry& r);
|
| 669 |
+
|
| 670 |
+
std::vector<Entry> dump_entries();
|
| 671 |
+
|
| 672 |
+
/*
|
| 673 |
+
Mark an Event as completed and free its events.
|
| 674 |
+
This is called by the watchdog thread, and is asynchronous from the
|
| 675 |
+
perspective of the main thread.
|
| 676 |
+
compute_duration defaults to true since retire_id is only called in the
|
| 677 |
+
watchdog thread, which is currently a place we call cuda APIs which may hang,
|
| 678 |
+
but care should be taken to avoid computing duration in any function that must
|
| 679 |
+
never hang. (timing must also be enabled for compute_duration - see
|
| 680 |
+
TORCH_NCCL_ENABLE_TIMING).
|
| 681 |
+
*/
|
| 682 |
+
void retire_id(std::optional<size_t> id, bool compute_duration = true);
|
| 683 |
+
|
| 684 |
+
const c10::List<c10::IValue> getCollectiveTrace(
|
| 685 |
+
bool includeStacktraces,
|
| 686 |
+
bool onlyActive);
|
| 687 |
+
|
| 688 |
+
// dump pg_entries
|
| 689 |
+
const c10::Dict<c10::IValue, c10::IValue> getPgConfig();
|
| 690 |
+
|
| 691 |
+
const std::map<std::string, std::map<std::string, std::string>>
|
| 692 |
+
getPgConfigJson();
|
| 693 |
+
|
| 694 |
+
// dump pg_status
|
| 695 |
+
const c10::Dict<c10::IValue, c10::IValue> getPgStatus();
|
| 696 |
+
|
| 697 |
+
const std::map<std::string, std::map<std::string, std::string>>
|
| 698 |
+
getPgStatusJson();
|
| 699 |
+
|
| 700 |
+
std::string dump_json(
|
| 701 |
+
const std::optional<std::unordered_map<
|
| 702 |
+
std::string,
|
| 703 |
+
std::unordered_map<std::string, std::string>>>& ncclDumpMap,
|
| 704 |
+
bool includeCollectives,
|
| 705 |
+
bool onlyActive);
|
| 706 |
+
|
| 707 |
+
// dump all collectives + ncclDumpMap
|
| 708 |
+
std::string dump(
|
| 709 |
+
const std::optional<std::unordered_map<
|
| 710 |
+
std::string,
|
| 711 |
+
std::unordered_map<std::string, std::string>>>& ncclDumpMap,
|
| 712 |
+
bool includeCollectives,
|
| 713 |
+
bool includeStackTraces,
|
| 714 |
+
bool onlyActive);
|
| 715 |
+
};
|
| 716 |
+
} // namespace c10d
|
| 717 |
+
|
| 718 |
+
#endif // USE_C10D_NCCL
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroup.hpp
ADDED
|
@@ -0,0 +1,748 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/c10d/Backend.hpp>
|
| 4 |
+
#include <memory>
|
| 5 |
+
#include <unordered_map>
|
| 6 |
+
#include <utility>
|
| 7 |
+
#include <vector>
|
| 8 |
+
|
| 9 |
+
#include <ATen/ATen.h>
|
| 10 |
+
#include <ATen/core/dispatch/Dispatcher.h>
|
| 11 |
+
#include <c10/macros/Macros.h>
|
| 12 |
+
|
| 13 |
+
#include <torch/csrc/distributed/c10d/Work.hpp>
|
| 14 |
+
// *************************************************************************
|
| 15 |
+
// PROCESS GROUP collective communication API IS BEING CHANGED BETWEEN
|
| 16 |
+
// versions 1.7 and 1.8.
|
| 17 |
+
// PLEASE DO NOT ADD ANY DEPENDENCIES.
|
| 18 |
+
// SEE RFC: https://github.com/pytorch/pytorch/issues/39662
|
| 19 |
+
// *************************************************************************
|
| 20 |
+
|
| 21 |
+
constexpr auto kProcessGroupDefaultTimeout =
|
| 22 |
+
std::chrono::milliseconds(30 * 60 * 1000);
|
| 23 |
+
|
| 24 |
+
namespace c10d {
|
| 25 |
+
|
| 26 |
+
// ProcessGroup is a base class that captures collective and point to
|
| 27 |
+
// point communication in a fixed set of processes.
|
| 28 |
+
//
|
| 29 |
+
// The functions specified in the class below describe the API alone;
|
| 30 |
+
// implementations are provided in subclasses.
|
| 31 |
+
//
|
| 32 |
+
// Every function that performs I/O is executed asynchronously by a
|
| 33 |
+
// thread pool owned by the ProcessGroup (by default). They return an
|
| 34 |
+
// object that can be used to wait for completion or error.
|
| 35 |
+
//
|
| 36 |
+
// The ProcessGroup can instantiate subgroups with fewer or an equal
|
| 37 |
+
// number of members. Implementations must take care that multiple
|
| 38 |
+
// process groups can be used in parallel and synchronize accordingly.
|
| 39 |
+
//
|
| 40 |
+
// The ProcessGroup assumes a fixed set of processes. If the set
|
| 41 |
+
// changes, existing instances must be destructed and instantiation
|
| 42 |
+
// and initialization must start from scratch. For members of the
|
| 43 |
+
// process group to find each other (referred to as rendezvous from
|
| 44 |
+
// hereon)
|
| 45 |
+
//
|
| 46 |
+
class TORCH_API ProcessGroup : public torch::CustomClassHolder {
|
| 47 |
+
public:
|
| 48 |
+
// ProcessGroup Options is a base struct that defines the basic options
|
| 49 |
+
// when constructing a ProcessGroup. Each ProcessGroup subclass should
|
| 50 |
+
// extend this struct and define its options if it wants to provide more
|
| 51 |
+
// config options (beyond basic ones defined here) to end user.
|
| 52 |
+
struct TORCH_API Options : torch::CustomClassHolder {
|
| 53 |
+
explicit Options(
|
| 54 |
+
std::string backend,
|
| 55 |
+
std::chrono::milliseconds timeout = kProcessGroupDefaultTimeout)
|
| 56 |
+
: timeout(timeout), backend(std::move(backend)) {}
|
| 57 |
+
~Options() override = default;
|
| 58 |
+
|
| 59 |
+
std::chrono::milliseconds timeout;
|
| 60 |
+
|
| 61 |
+
// backend name
|
| 62 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
| 63 |
+
const std::string backend;
|
| 64 |
+
};
|
| 65 |
+
|
| 66 |
+
enum BackendType : uint8_t {
|
| 67 |
+
UNDEFINED = 0,
|
| 68 |
+
GLOO = 1,
|
| 69 |
+
NCCL = 2,
|
| 70 |
+
UCC = 3,
|
| 71 |
+
MPI = 4,
|
| 72 |
+
CUSTOM = 5,
|
| 73 |
+
};
|
| 74 |
+
|
| 75 |
+
// Not used, set for backwards compatibility and only used for TypeDef in
|
| 76 |
+
// Ops.cpp
|
| 77 |
+
explicit ProcessGroup(int rank, int size);
|
| 78 |
+
|
| 79 |
+
explicit ProcessGroup(
|
| 80 |
+
const c10::intrusive_ptr<::c10d::Store>& store,
|
| 81 |
+
int rank,
|
| 82 |
+
int size,
|
| 83 |
+
c10::intrusive_ptr<Options> options);
|
| 84 |
+
~ProcessGroup() override;
|
| 85 |
+
|
| 86 |
+
int getRank() const {
|
| 87 |
+
return rank_;
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
int getSize() const {
|
| 91 |
+
return size_;
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
// Returns an unique opaque ID of this process group object.
|
| 95 |
+
int64_t getID() const {
|
| 96 |
+
return reinterpret_cast<std::intptr_t>(this);
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
// Returns an unique opaque ID of a backend for the specific backend type
|
| 100 |
+
// that can correlate with this process group's collectives.
|
| 101 |
+
int64_t getBackendID(BackendType backend_type) const {
|
| 102 |
+
return reinterpret_cast<std::intptr_t>(getBackend(backend_type).get());
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
virtual const std::string getBackendName() const {
|
| 106 |
+
return options_->backend;
|
| 107 |
+
};
|
| 108 |
+
|
| 109 |
+
BackendType getBackendType() const {
|
| 110 |
+
return backendType_;
|
| 111 |
+
};
|
| 112 |
+
|
| 113 |
+
virtual void startCoalescing(c10::DeviceType deviceType) {
|
| 114 |
+
// only nccl has implemented startCoalescing so only execute for nccl
|
| 115 |
+
// backends
|
| 116 |
+
auto backend = getBackend(deviceType);
|
| 117 |
+
backend->startCoalescing();
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
virtual c10::intrusive_ptr<Work> endCoalescing(c10::DeviceType deviceType) {
|
| 121 |
+
// only nccl has implemented endCoalescing so only execute for nccl
|
| 122 |
+
// backends
|
| 123 |
+
auto backend = getBackend(deviceType);
|
| 124 |
+
auto work = backend->endCoalescing();
|
| 125 |
+
return work;
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
virtual c10::intrusive_ptr<Work> broadcast(
|
| 129 |
+
std::vector<at::Tensor>& tensors,
|
| 130 |
+
const BroadcastOptions& opts = BroadcastOptions()) {
|
| 131 |
+
static auto op =
|
| 132 |
+
c10::Dispatcher::singleton()
|
| 133 |
+
.findSchemaOrThrow("c10d::broadcast_", "")
|
| 134 |
+
.typed<
|
| 135 |
+
std::tuple<std::vector<at::Tensor>, c10::intrusive_ptr<Work>>(
|
| 136 |
+
at::TensorList,
|
| 137 |
+
const c10::intrusive_ptr<::c10d::ProcessGroup>&,
|
| 138 |
+
int64_t,
|
| 139 |
+
int64_t,
|
| 140 |
+
bool,
|
| 141 |
+
int64_t)>();
|
| 142 |
+
// It's awakward to unbox the opts here and box them again in the custom C++
|
| 143 |
+
// op. But it's also complicated to make opts as a CustomClassHolder. Leave
|
| 144 |
+
// it as it is now.
|
| 145 |
+
return std::get<1>(op.call(
|
| 146 |
+
tensors,
|
| 147 |
+
c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
|
| 148 |
+
opts.rootRank,
|
| 149 |
+
opts.rootTensor,
|
| 150 |
+
opts.asyncOp,
|
| 151 |
+
opts.timeout.count()));
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
virtual c10::intrusive_ptr<Work> allreduce(
|
| 155 |
+
std::vector<at::Tensor>& tensors,
|
| 156 |
+
const AllreduceOptions& opts = AllreduceOptions()) {
|
| 157 |
+
static auto op =
|
| 158 |
+
c10::Dispatcher::singleton()
|
| 159 |
+
.findSchemaOrThrow("c10d::allreduce_", "")
|
| 160 |
+
.typed<
|
| 161 |
+
std::tuple<std::vector<at::Tensor>, c10::intrusive_ptr<Work>>(
|
| 162 |
+
at::TensorList,
|
| 163 |
+
const c10::intrusive_ptr<::c10d::ProcessGroup>&,
|
| 164 |
+
const c10::intrusive_ptr<::c10d::ReduceOp>&,
|
| 165 |
+
const std::optional<at::Tensor>& sparse_indices,
|
| 166 |
+
int64_t)>();
|
| 167 |
+
|
| 168 |
+
return std::get<1>(op.call(
|
| 169 |
+
tensors,
|
| 170 |
+
c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
|
| 171 |
+
c10::make_intrusive<ReduceOp>(opts.reduceOp),
|
| 172 |
+
opts.sparseIndices,
|
| 173 |
+
opts.timeout.count()));
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
virtual c10::intrusive_ptr<Work> allreduce_coalesced(
|
| 177 |
+
std::vector<at::Tensor>& tensors,
|
| 178 |
+
const AllreduceCoalescedOptions& opts = AllreduceCoalescedOptions()) {
|
| 179 |
+
static auto op = c10::Dispatcher::singleton()
|
| 180 |
+
.findSchemaOrThrow("c10d::allreduce_coalesced_", "")
|
| 181 |
+
.typed<c10::intrusive_ptr<::c10d::Work>(
|
| 182 |
+
at::TensorList,
|
| 183 |
+
const c10::intrusive_ptr<::c10d::ProcessGroup>&,
|
| 184 |
+
const c10::intrusive_ptr<::c10d::ReduceOp>&,
|
| 185 |
+
int64_t)>();
|
| 186 |
+
|
| 187 |
+
return op.call(
|
| 188 |
+
tensors,
|
| 189 |
+
c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
|
| 190 |
+
c10::make_intrusive<ReduceOp>(opts.reduceOp),
|
| 191 |
+
opts.timeout.count());
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
virtual c10::intrusive_ptr<Work> reduce(
|
| 195 |
+
std::vector<at::Tensor>& tensors,
|
| 196 |
+
const ReduceOptions& opts = ReduceOptions()) {
|
| 197 |
+
static auto op = c10::Dispatcher::singleton()
|
| 198 |
+
.findSchemaOrThrow("c10d::reduce_", "")
|
| 199 |
+
.typed<c10::intrusive_ptr<::c10d::Work>(
|
| 200 |
+
at::TensorList,
|
| 201 |
+
const c10::intrusive_ptr<::c10d::ProcessGroup>&,
|
| 202 |
+
const c10::intrusive_ptr<::c10d::ReduceOp>&,
|
| 203 |
+
int64_t,
|
| 204 |
+
int64_t,
|
| 205 |
+
int64_t)>();
|
| 206 |
+
return op.call(
|
| 207 |
+
tensors,
|
| 208 |
+
c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
|
| 209 |
+
c10::make_intrusive<ReduceOp>(opts.reduceOp),
|
| 210 |
+
opts.rootRank,
|
| 211 |
+
opts.rootTensor,
|
| 212 |
+
opts.timeout.count());
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
virtual c10::intrusive_ptr<Work> allgather(
|
| 216 |
+
std::vector<std::vector<at::Tensor>>& outputTensors,
|
| 217 |
+
std::vector<at::Tensor>& inputTensors,
|
| 218 |
+
const AllgatherOptions& opts = AllgatherOptions()) {
|
| 219 |
+
static auto op = c10::Dispatcher::singleton()
|
| 220 |
+
.findSchemaOrThrow("c10d::allgather_", "")
|
| 221 |
+
.typed<std::tuple<
|
| 222 |
+
std::vector<std::vector<at::Tensor>>,
|
| 223 |
+
c10::intrusive_ptr<Work>>(
|
| 224 |
+
const std::vector<std::vector<at::Tensor>>&,
|
| 225 |
+
at::TensorList,
|
| 226 |
+
const c10::intrusive_ptr<::c10d::ProcessGroup>&,
|
| 227 |
+
int64_t)>();
|
| 228 |
+
|
| 229 |
+
return std::get<1>(op.call(
|
| 230 |
+
outputTensors,
|
| 231 |
+
inputTensors,
|
| 232 |
+
c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
|
| 233 |
+
opts.timeout.count()));
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
// Gathers a single tensor inputBuffer into a single buffer outputBuffer that
|
| 237 |
+
// is interpreted as a contiguous collection of size inputBuffer * WORLD_SIZE.
|
| 238 |
+
// For implementers of ProcessGroup API and advanced users only.
|
| 239 |
+
// Note: this function will be deprecated in near future.
|
| 240 |
+
virtual c10::intrusive_ptr<Work> _allgather_base(
|
| 241 |
+
at::Tensor& outputBuffer,
|
| 242 |
+
at::Tensor& inputBuffer,
|
| 243 |
+
const AllgatherOptions& opts = AllgatherOptions()) {
|
| 244 |
+
static auto op =
|
| 245 |
+
c10::Dispatcher::singleton()
|
| 246 |
+
.findSchemaOrThrow("c10d::_allgather_base_", "")
|
| 247 |
+
.typed<std::tuple<at::Tensor, c10::intrusive_ptr<Work>>(
|
| 248 |
+
at::Tensor&,
|
| 249 |
+
at::Tensor&,
|
| 250 |
+
const c10::intrusive_ptr<::c10d::ProcessGroup>&,
|
| 251 |
+
bool,
|
| 252 |
+
int64_t)>();
|
| 253 |
+
|
| 254 |
+
return std::get<1>(op.call(
|
| 255 |
+
outputBuffer,
|
| 256 |
+
inputBuffer,
|
| 257 |
+
c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
|
| 258 |
+
opts.asyncOp,
|
| 259 |
+
opts.timeout.count()));
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
// This function is deprecated and will be moved out of ProcessGroup to comms:
|
| 263 |
+
// * do not add dependencies on this function,
|
| 264 |
+
// * do not implement it in your ProcessGroup, implement _allgather_base
|
| 265 |
+
// instead.
|
| 266 |
+
virtual c10::intrusive_ptr<Work> allgather_coalesced(
|
| 267 |
+
std::vector<std::vector<at::Tensor>>& outputTensorLists,
|
| 268 |
+
std::vector<at::Tensor>& inputTensors,
|
| 269 |
+
const AllgatherOptions& opts = AllgatherOptions()) {
|
| 270 |
+
static auto op =
|
| 271 |
+
c10::Dispatcher::singleton()
|
| 272 |
+
.findSchemaOrThrow("c10d::allgather_coalesced_", "")
|
| 273 |
+
.typed<c10::intrusive_ptr<Work>(
|
| 274 |
+
const std::vector<std::vector<at::Tensor>>&,
|
| 275 |
+
const at::TensorList&,
|
| 276 |
+
const c10::intrusive_ptr<::c10d::ProcessGroup>&)>();
|
| 277 |
+
|
| 278 |
+
return op.call(
|
| 279 |
+
outputTensorLists,
|
| 280 |
+
inputTensors,
|
| 281 |
+
c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this));
|
| 282 |
+
}
|
| 283 |
+
|
| 284 |
+
// This function is a coalesced version of `allgather_into_tensor` (currently
|
| 285 |
+
// still named as `_allgather_base`). Each tensor in the vector corresponds to
|
| 286 |
+
// an input/output of one `allgather_into_tensor` operation.
|
| 287 |
+
virtual c10::intrusive_ptr<Work> allgather_into_tensor_coalesced(
|
| 288 |
+
std::vector<at::Tensor>& outputTensors,
|
| 289 |
+
std::vector<at::Tensor>& inputTensors,
|
| 290 |
+
const AllgatherOptions& opts = AllgatherOptions()) {
|
| 291 |
+
static auto op =
|
| 292 |
+
c10::Dispatcher::singleton()
|
| 293 |
+
.findSchemaOrThrow("c10d::allgather_into_tensor_coalesced_", "")
|
| 294 |
+
.typed<c10::intrusive_ptr<Work>(
|
| 295 |
+
const at::TensorList,
|
| 296 |
+
const at::TensorList,
|
| 297 |
+
const c10::intrusive_ptr<::c10d::ProcessGroup>&)>();
|
| 298 |
+
|
| 299 |
+
return op.call(
|
| 300 |
+
outputTensors,
|
| 301 |
+
inputTensors,
|
| 302 |
+
c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this));
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
virtual c10::intrusive_ptr<Work> gather(
|
| 306 |
+
std::vector<std::vector<at::Tensor>>& outputTensors,
|
| 307 |
+
std::vector<at::Tensor>& inputTensors,
|
| 308 |
+
const GatherOptions& opts = GatherOptions()) {
|
| 309 |
+
static auto op = c10::Dispatcher::singleton()
|
| 310 |
+
.findSchemaOrThrow("c10d::gather_", "")
|
| 311 |
+
.typed<c10::intrusive_ptr<::c10d::Work>(
|
| 312 |
+
const std::vector<std::vector<at::Tensor>>&,
|
| 313 |
+
const at::TensorList&,
|
| 314 |
+
const c10::intrusive_ptr<::c10d::ProcessGroup>&,
|
| 315 |
+
int64_t,
|
| 316 |
+
int64_t)>();
|
| 317 |
+
return op.call(
|
| 318 |
+
outputTensors,
|
| 319 |
+
inputTensors,
|
| 320 |
+
c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
|
| 321 |
+
opts.rootRank,
|
| 322 |
+
opts.timeout.count());
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
virtual c10::intrusive_ptr<Work> scatter(
|
| 326 |
+
std::vector<at::Tensor>& outputTensors,
|
| 327 |
+
std::vector<std::vector<at::Tensor>>& inputTensors,
|
| 328 |
+
const ScatterOptions& opts = ScatterOptions()) {
|
| 329 |
+
static auto op =
|
| 330 |
+
c10::Dispatcher::singleton()
|
| 331 |
+
.findSchemaOrThrow("c10d::scatter_", "")
|
| 332 |
+
.typed<
|
| 333 |
+
std::tuple<std::vector<at::Tensor>, c10::intrusive_ptr<Work>>(
|
| 334 |
+
const at::TensorList&,
|
| 335 |
+
const std::vector<std::vector<at::Tensor>>&,
|
| 336 |
+
const c10::intrusive_ptr<::c10d::ProcessGroup>&,
|
| 337 |
+
int64_t,
|
| 338 |
+
bool,
|
| 339 |
+
int64_t)>();
|
| 340 |
+
return std::get<1>(op.call(
|
| 341 |
+
outputTensors,
|
| 342 |
+
inputTensors,
|
| 343 |
+
c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
|
| 344 |
+
opts.rootRank,
|
| 345 |
+
opts.asyncOp,
|
| 346 |
+
opts.timeout.count()));
|
| 347 |
+
}
|
| 348 |
+
|
| 349 |
+
virtual c10::intrusive_ptr<Work> reduce_scatter(
|
| 350 |
+
std::vector<at::Tensor>& outputTensors,
|
| 351 |
+
std::vector<std::vector<at::Tensor>>& inputTensors,
|
| 352 |
+
const ReduceScatterOptions& opts = ReduceScatterOptions()) {
|
| 353 |
+
static auto op =
|
| 354 |
+
c10::Dispatcher::singleton()
|
| 355 |
+
.findSchemaOrThrow("c10d::reduce_scatter_", "")
|
| 356 |
+
.typed<
|
| 357 |
+
std::tuple<std::vector<at::Tensor>, c10::intrusive_ptr<Work>>(
|
| 358 |
+
const at::TensorList&,
|
| 359 |
+
const std::vector<std::vector<at::Tensor>>&,
|
| 360 |
+
const c10::intrusive_ptr<::c10d::ProcessGroup>&,
|
| 361 |
+
const c10::intrusive_ptr<::c10d::ReduceOp>&,
|
| 362 |
+
int64_t)>();
|
| 363 |
+
return std::get<1>(op.call(
|
| 364 |
+
outputTensors,
|
| 365 |
+
inputTensors,
|
| 366 |
+
c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
|
| 367 |
+
c10::make_intrusive<::c10d::ReduceOp>(opts.reduceOp),
|
| 368 |
+
opts.timeout.count()));
|
| 369 |
+
}
|
| 370 |
+
|
| 371 |
+
virtual c10::intrusive_ptr<Work> _reduce_scatter_base(
|
| 372 |
+
at::Tensor& outputBuffer,
|
| 373 |
+
at::Tensor& inputBuffer,
|
| 374 |
+
const ReduceScatterOptions& opts = ReduceScatterOptions()) {
|
| 375 |
+
static auto op =
|
| 376 |
+
c10::Dispatcher::singleton()
|
| 377 |
+
.findSchemaOrThrow("c10d::_reduce_scatter_base_", "")
|
| 378 |
+
.typed<std::tuple<at::Tensor, c10::intrusive_ptr<Work>>(
|
| 379 |
+
at::Tensor&,
|
| 380 |
+
at::Tensor&,
|
| 381 |
+
const c10::intrusive_ptr<::c10d::ProcessGroup>&,
|
| 382 |
+
const c10::intrusive_ptr<::c10d::ReduceOp>&,
|
| 383 |
+
bool,
|
| 384 |
+
int64_t)>();
|
| 385 |
+
return std::get<1>(op.call(
|
| 386 |
+
outputBuffer,
|
| 387 |
+
inputBuffer,
|
| 388 |
+
c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
|
| 389 |
+
c10::make_intrusive<::c10d::ReduceOp>(opts.reduceOp),
|
| 390 |
+
opts.asyncOp,
|
| 391 |
+
opts.timeout.count()));
|
| 392 |
+
}
|
| 393 |
+
|
| 394 |
+
// This function is a coalesced version of `reduce_scatter_tensor` (currently
|
| 395 |
+
// still named as `_reduce_scatter_base`). Each tensor in the vector
|
| 396 |
+
// corresponds to an input/output of one `reduce_scatter_tensor` operation.
|
| 397 |
+
virtual c10::intrusive_ptr<Work> reduce_scatter_tensor_coalesced(
|
| 398 |
+
std::vector<at::Tensor>& outputTensors,
|
| 399 |
+
std::vector<at::Tensor>& inputTensors,
|
| 400 |
+
const ReduceScatterOptions& opts = ReduceScatterOptions()) {
|
| 401 |
+
static auto op =
|
| 402 |
+
c10::Dispatcher::singleton()
|
| 403 |
+
.findSchemaOrThrow("c10d::reduce_scatter_tensor_coalesced_", "")
|
| 404 |
+
.typed<c10::intrusive_ptr<Work>(
|
| 405 |
+
const at::TensorList,
|
| 406 |
+
const at::TensorList,
|
| 407 |
+
const c10::intrusive_ptr<::c10d::ProcessGroup>&,
|
| 408 |
+
const c10::intrusive_ptr<::c10d::ReduceOp>&,
|
| 409 |
+
int64_t)>();
|
| 410 |
+
|
| 411 |
+
return op.call(
|
| 412 |
+
outputTensors,
|
| 413 |
+
inputTensors,
|
| 414 |
+
c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
|
| 415 |
+
c10::make_intrusive<::c10d::ReduceOp>(opts.reduceOp),
|
| 416 |
+
opts.timeout.count());
|
| 417 |
+
}
|
| 418 |
+
|
| 419 |
+
virtual c10::intrusive_ptr<Work> alltoall_base(
|
| 420 |
+
at::Tensor& outputBuffer,
|
| 421 |
+
at::Tensor& inputBuffer,
|
| 422 |
+
std::vector<int64_t>& outputSplitSizes,
|
| 423 |
+
std::vector<int64_t>& inputSplitSizes,
|
| 424 |
+
const AllToAllOptions& opts = AllToAllOptions()) {
|
| 425 |
+
static auto op = c10::Dispatcher::singleton()
|
| 426 |
+
.findSchemaOrThrow("c10d::alltoall_base_", "")
|
| 427 |
+
.typed<c10::intrusive_ptr<::c10d::Work>(
|
| 428 |
+
at::Tensor&,
|
| 429 |
+
at::Tensor&,
|
| 430 |
+
const c10::intrusive_ptr<::c10d::ProcessGroup>&,
|
| 431 |
+
std::vector<int64_t>,
|
| 432 |
+
std::vector<int64_t>,
|
| 433 |
+
int64_t)>();
|
| 434 |
+
return op.call(
|
| 435 |
+
outputBuffer,
|
| 436 |
+
inputBuffer,
|
| 437 |
+
c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
|
| 438 |
+
outputSplitSizes,
|
| 439 |
+
inputSplitSizes,
|
| 440 |
+
opts.timeout.count());
|
| 441 |
+
}
|
| 442 |
+
|
| 443 |
+
virtual c10::intrusive_ptr<Work> alltoall(
|
| 444 |
+
std::vector<at::Tensor>& outputTensors,
|
| 445 |
+
std::vector<at::Tensor>& inputTensors,
|
| 446 |
+
const AllToAllOptions& opts = AllToAllOptions()) {
|
| 447 |
+
static auto op =
|
| 448 |
+
c10::Dispatcher::singleton()
|
| 449 |
+
.findSchemaOrThrow("c10d::alltoall_", "")
|
| 450 |
+
.typed<
|
| 451 |
+
std::tuple<std::vector<at::Tensor>, c10::intrusive_ptr<Work>>(
|
| 452 |
+
const at::TensorList&,
|
| 453 |
+
const at::TensorList&,
|
| 454 |
+
const c10::intrusive_ptr<::c10d::ProcessGroup>&,
|
| 455 |
+
int64_t)>();
|
| 456 |
+
return std::get<1>(op.call(
|
| 457 |
+
outputTensors,
|
| 458 |
+
inputTensors,
|
| 459 |
+
c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
|
| 460 |
+
opts.timeout.count()));
|
| 461 |
+
}
|
| 462 |
+
|
| 463 |
+
virtual void monitoredBarrier(
|
| 464 |
+
const BarrierOptions& opts,
|
| 465 |
+
bool wait_all_ranks = false) {
|
| 466 |
+
static auto op = c10::Dispatcher::singleton()
|
| 467 |
+
.findSchemaOrThrow("c10d::monitored_barrier_", "")
|
| 468 |
+
.typed<void(
|
| 469 |
+
at::Tensor,
|
| 470 |
+
const c10::intrusive_ptr<::c10d::ProcessGroup>&,
|
| 471 |
+
const std::vector<int64_t>&,
|
| 472 |
+
int64_t,
|
| 473 |
+
bool)>();
|
| 474 |
+
// Default to using cpu implementation, monitored barrier is only for GLOO
|
| 475 |
+
at::Tensor tensor = at::empty({0}, at::TensorOptions().device(at::kCPU));
|
| 476 |
+
op.call(
|
| 477 |
+
tensor,
|
| 478 |
+
c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
|
| 479 |
+
opts.device_ids,
|
| 480 |
+
opts.timeout.count(),
|
| 481 |
+
wait_all_ranks);
|
| 482 |
+
}
|
| 483 |
+
|
| 484 |
+
// Agrees on an initial sequence number for the whole group by having rank 0
|
| 485 |
+
// create it and broadcast it to other ranks using the store. Only implemented
|
| 486 |
+
// for GLOO and NCCL backends currently.
|
| 487 |
+
virtual void setSequenceNumberForGroup() {
|
| 488 |
+
auto backendType = getBackendType();
|
| 489 |
+
// TODO: HACK for backend name to get sequence number for that backend.
|
| 490 |
+
if (backendType == ProcessGroup::BackendType::GLOO ||
|
| 491 |
+
backendType == ProcessGroup::BackendType::NCCL ||
|
| 492 |
+
backendType == ProcessGroup::BackendType::UCC) {
|
| 493 |
+
getDefaultBackend()->setSequenceNumberForGroup();
|
| 494 |
+
} else {
|
| 495 |
+
TORCH_CHECK(
|
| 496 |
+
false,
|
| 497 |
+
c10::str(
|
| 498 |
+
"ProcessGroup ",
|
| 499 |
+
getBackendName(),
|
| 500 |
+
" does not yet support sequence numbers."));
|
| 501 |
+
}
|
| 502 |
+
}
|
| 503 |
+
|
| 504 |
+
// Retrieves the current sequence number for the whole group, which should be
|
| 505 |
+
// in sync. If the returned number is not consistent across the group, it
|
| 506 |
+
// may indicate that there is some sort of collective desynchronization.
|
| 507 |
+
virtual uint64_t getSequenceNumberForGroup() {
|
| 508 |
+
auto backendType = getBackendType();
|
| 509 |
+
|
| 510 |
+
// TODO: HACK for backend name to get sequence number for that backend.
|
| 511 |
+
if (backendType == ProcessGroup::BackendType::GLOO ||
|
| 512 |
+
backendType == ProcessGroup::BackendType::NCCL ||
|
| 513 |
+
backendType == ProcessGroup::BackendType::UCC) {
|
| 514 |
+
return getDefaultBackend()->getSequenceNumberForGroup();
|
| 515 |
+
} else {
|
| 516 |
+
TORCH_CHECK(
|
| 517 |
+
false,
|
| 518 |
+
c10::str(
|
| 519 |
+
"ProcessGroup ",
|
| 520 |
+
getBackendName(),
|
| 521 |
+
" does not yet support sequence numbers."));
|
| 522 |
+
}
|
| 523 |
+
}
|
| 524 |
+
|
| 525 |
+
virtual c10::intrusive_ptr<Work> send(
|
| 526 |
+
std::vector<at::Tensor>& tensors,
|
| 527 |
+
int dstRank,
|
| 528 |
+
int tag) {
|
| 529 |
+
static auto op = c10::Dispatcher::singleton()
|
| 530 |
+
.findSchemaOrThrow("c10d::send", "")
|
| 531 |
+
.typed<c10::intrusive_ptr<::c10d::Work>(
|
| 532 |
+
at::TensorList,
|
| 533 |
+
const c10::intrusive_ptr<::c10d::ProcessGroup>&,
|
| 534 |
+
int64_t,
|
| 535 |
+
int64_t)>();
|
| 536 |
+
return op.call(
|
| 537 |
+
tensors,
|
| 538 |
+
c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
|
| 539 |
+
dstRank,
|
| 540 |
+
tag);
|
| 541 |
+
}
|
| 542 |
+
|
| 543 |
+
virtual c10::intrusive_ptr<Work> recv(
|
| 544 |
+
std::vector<at::Tensor>& tensors,
|
| 545 |
+
int srcRank,
|
| 546 |
+
int tag) {
|
| 547 |
+
static auto op = c10::Dispatcher::singleton()
|
| 548 |
+
.findSchemaOrThrow("c10d::recv_", "")
|
| 549 |
+
.typed<c10::intrusive_ptr<::c10d::Work>(
|
| 550 |
+
at::TensorList,
|
| 551 |
+
const c10::intrusive_ptr<::c10d::ProcessGroup>&,
|
| 552 |
+
int64_t,
|
| 553 |
+
int64_t)>();
|
| 554 |
+
return op.call(
|
| 555 |
+
tensors,
|
| 556 |
+
c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
|
| 557 |
+
srcRank,
|
| 558 |
+
tag);
|
| 559 |
+
}
|
| 560 |
+
|
| 561 |
+
virtual c10::intrusive_ptr<Work> recvAnysource(
|
| 562 |
+
std::vector<at::Tensor>& tensors,
|
| 563 |
+
int tag) {
|
| 564 |
+
static auto op = c10::Dispatcher::singleton()
|
| 565 |
+
.findSchemaOrThrow("c10d::recv_any_source_", "")
|
| 566 |
+
.typed<c10::intrusive_ptr<::c10d::Work>(
|
| 567 |
+
at::TensorList,
|
| 568 |
+
const c10::intrusive_ptr<::c10d::ProcessGroup>&,
|
| 569 |
+
int64_t)>();
|
| 570 |
+
return op.call(
|
| 571 |
+
tensors,
|
| 572 |
+
c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
|
| 573 |
+
tag);
|
| 574 |
+
}
|
| 575 |
+
|
| 576 |
+
virtual c10::intrusive_ptr<Work> barrier(
|
| 577 |
+
const BarrierOptions& opts = BarrierOptions()) {
|
| 578 |
+
static at::Tensor tensor;
|
| 579 |
+
// TODO: if nccl was specified then use it
|
| 580 |
+
auto device = opts.device;
|
| 581 |
+
if (device.has_value()) {
|
| 582 |
+
// set device tensor from argument
|
| 583 |
+
tensor = at::empty(
|
| 584 |
+
{1}, at::TensorOptions().device(device.value()).dtype(at::kByte));
|
| 585 |
+
} else if (backendType_ == c10d::ProcessGroup::BackendType::NCCL) {
|
| 586 |
+
// set cuda tensor
|
| 587 |
+
tensor = at::empty(
|
| 588 |
+
{1},
|
| 589 |
+
at::TensorOptions().device(at::DeviceType::CUDA).dtype(at::kByte));
|
| 590 |
+
} else {
|
| 591 |
+
// Default to using cpu implementation
|
| 592 |
+
tensor = at::empty(
|
| 593 |
+
{1},
|
| 594 |
+
at::TensorOptions().device(at::DeviceType::CPU).dtype(at::kByte));
|
| 595 |
+
}
|
| 596 |
+
|
| 597 |
+
static auto op = c10::Dispatcher::singleton()
|
| 598 |
+
.findSchemaOrThrow("c10d::barrier", "")
|
| 599 |
+
.typed<c10::intrusive_ptr<::c10d::Work>(
|
| 600 |
+
at::Tensor,
|
| 601 |
+
const c10::intrusive_ptr<::c10d::ProcessGroup>&,
|
| 602 |
+
const std::vector<int64_t>&,
|
| 603 |
+
int64_t)>();
|
| 604 |
+
|
| 605 |
+
return op.call(
|
| 606 |
+
tensor,
|
| 607 |
+
c10::intrusive_ptr<ProcessGroup>::unsafe_reclaim_from_nonowning(this),
|
| 608 |
+
opts.device_ids,
|
| 609 |
+
opts.timeout.count());
|
| 610 |
+
}
|
| 611 |
+
|
| 612 |
+
c10::intrusive_ptr<Options> getOptions() {
|
| 613 |
+
return options_;
|
| 614 |
+
}
|
| 615 |
+
|
| 616 |
+
bool hasBackends() {
|
| 617 |
+
return !deviceTypeToBackendType_.empty();
|
| 618 |
+
}
|
| 619 |
+
|
| 620 |
+
void setBackend(
|
| 621 |
+
c10::DeviceType deviceType,
|
| 622 |
+
BackendType backendType,
|
| 623 |
+
const std::optional<c10::intrusive_ptr<Backend>>& backend) {
|
| 624 |
+
// TODO: should we add these entries after the backend setting succeeds?
|
| 625 |
+
deviceTypeToBackendType_[deviceType] = backendType;
|
| 626 |
+
deviceTypes_.insert(deviceType);
|
| 627 |
+
// if the backendType is already set then reuse it for this device
|
| 628 |
+
if (backendTypeToBackend_.find(backendType) !=
|
| 629 |
+
backendTypeToBackend_.end()) {
|
| 630 |
+
auto existingBackend = backendTypeToBackend_.at(backendType);
|
| 631 |
+
deviceTypeToBackend_[deviceType] = existingBackend;
|
| 632 |
+
TORCH_CHECK(
|
| 633 |
+
existingBackend->getBoundDeviceId() ==
|
| 634 |
+
(*backend)->getBoundDeviceId());
|
| 635 |
+
} else {
|
| 636 |
+
// check if backend has value
|
| 637 |
+
if (backend.has_value()) {
|
| 638 |
+
deviceTypeToBackend_[deviceType] = backend.value();
|
| 639 |
+
backendTypeToBackend_[backendType] = backend.value();
|
| 640 |
+
(*backend)->setBoundDeviceId(bound_device_id_);
|
| 641 |
+
}
|
| 642 |
+
}
|
| 643 |
+
}
|
| 644 |
+
|
| 645 |
+
c10::intrusive_ptr<Backend> getDefaultBackend() const {
|
| 646 |
+
TORCH_CHECK(
|
| 647 |
+
backendTypeToBackend_.find(backendType_) != backendTypeToBackend_.end(),
|
| 648 |
+
"Could not find the default backend type ",
|
| 649 |
+
backendType_,
|
| 650 |
+
" for Process Group with name ",
|
| 651 |
+
getBackendName(),
|
| 652 |
+
".");
|
| 653 |
+
return backendTypeToBackend_.at(backendType_);
|
| 654 |
+
}
|
| 655 |
+
|
| 656 |
+
c10::intrusive_ptr<Backend> getBackend(c10::DeviceType deviceType);
|
| 657 |
+
|
| 658 |
+
c10::intrusive_ptr<Backend> getBackend(BackendType backendType) const {
|
| 659 |
+
TORCH_CHECK(
|
| 660 |
+
backendTypeToBackend_.find(backendType) != backendTypeToBackend_.end(),
|
| 661 |
+
"Could not find backend type ",
|
| 662 |
+
backendType,
|
| 663 |
+
".");
|
| 664 |
+
return backendTypeToBackend_.at(backendType);
|
| 665 |
+
}
|
| 666 |
+
|
| 667 |
+
// Return device types supported by this ProcessGroup.
|
| 668 |
+
// Note: the return type is `Device` rather than `DeviceType` for the purpose
|
| 669 |
+
// of easy comparison at Python level. The `Device` will have default index
|
| 670 |
+
// (-1).
|
| 671 |
+
std::vector<c10::Device> getDeviceTypes() const {
|
| 672 |
+
std::vector<c10::Device> devices;
|
| 673 |
+
devices.reserve(deviceTypes_.size());
|
| 674 |
+
for (auto& dt : deviceTypes_) {
|
| 675 |
+
devices.emplace_back(dt);
|
| 676 |
+
}
|
| 677 |
+
return devices;
|
| 678 |
+
}
|
| 679 |
+
|
| 680 |
+
void registerOnCompletionHook(
|
| 681 |
+
std::function<void(std::shared_ptr<WorkInfo>)>&& hook) {
|
| 682 |
+
getDefaultBackend()->registerOnCompletionHook(std::move(hook));
|
| 683 |
+
}
|
| 684 |
+
|
| 685 |
+
void waitForPendingWorks() {
|
| 686 |
+
getDefaultBackend()->waitForPendingWorks();
|
| 687 |
+
}
|
| 688 |
+
|
| 689 |
+
bool hasHooks() const {
|
| 690 |
+
return getDefaultBackend()->hasHooks();
|
| 691 |
+
}
|
| 692 |
+
|
| 693 |
+
const std::string& getGroupName() const;
|
| 694 |
+
void setGroupName(const std::string& name);
|
| 695 |
+
const std::string& getGroupDesc() const;
|
| 696 |
+
void setGroupDesc(const std::string& name);
|
| 697 |
+
void enableCollectivesTiming();
|
| 698 |
+
|
| 699 |
+
void release_resources() override;
|
| 700 |
+
|
| 701 |
+
// ProcessGroups optionally can be "bound" to a specific device.
|
| 702 |
+
// Currently this is only for nccl and allows for some opt-in
|
| 703 |
+
// optimizations such as automatic use of ncclCommSplit. The device
|
| 704 |
+
// is specified in `init_process_group` and eventually makes it
|
| 705 |
+
// here and then down into the actual backend instances.
|
| 706 |
+
std::optional<at::Device> getBoundDeviceId() const {
|
| 707 |
+
return bound_device_id_;
|
| 708 |
+
}
|
| 709 |
+
|
| 710 |
+
void setBoundDeviceId(std::optional<at::Device> device) {
|
| 711 |
+
if (device) {
|
| 712 |
+
TORCH_CHECK(device->has_index(), "setBoundDeviceId must have an index");
|
| 713 |
+
}
|
| 714 |
+
bound_device_id_ = device;
|
| 715 |
+
}
|
| 716 |
+
|
| 717 |
+
protected:
|
| 718 |
+
// Implementations of this interface need to call this to setup
|
| 719 |
+
// appropriate logging etc.
|
| 720 |
+
void init();
|
| 721 |
+
|
| 722 |
+
c10::intrusive_ptr<c10d::Store> store_;
|
| 723 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
| 724 |
+
const int rank_;
|
| 725 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
| 726 |
+
const int size_;
|
| 727 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
| 728 |
+
const c10::intrusive_ptr<Options> options_;
|
| 729 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
| 730 |
+
const BackendType backendType_;
|
| 731 |
+
std::string pg_desc_;
|
| 732 |
+
|
| 733 |
+
// Debug level setting. It is parsed once when ProcessGroup is constructed and
|
| 734 |
+
// remains the same across use of this process group.
|
| 735 |
+
DebugLevel dist_debug_level_{DebugLevel::Off};
|
| 736 |
+
|
| 737 |
+
// Backend classes for this ProcessGroup
|
| 738 |
+
std::unordered_set<c10::DeviceType> deviceTypes_;
|
| 739 |
+
std::unordered_map<c10::DeviceType, BackendType> deviceTypeToBackendType_;
|
| 740 |
+
std::unordered_map<c10::DeviceType, c10::intrusive_ptr<Backend>>
|
| 741 |
+
deviceTypeToBackend_;
|
| 742 |
+
std::unordered_map<BackendType, c10::intrusive_ptr<Backend>>
|
| 743 |
+
backendTypeToBackend_;
|
| 744 |
+
|
| 745 |
+
std::optional<at::Device> bound_device_id_;
|
| 746 |
+
};
|
| 747 |
+
|
| 748 |
+
} // namespace c10d
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp
ADDED
|
@@ -0,0 +1,448 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifdef USE_C10D_GLOO
|
| 4 |
+
|
| 5 |
+
#include <condition_variable>
|
| 6 |
+
#include <deque>
|
| 7 |
+
#include <mutex>
|
| 8 |
+
#include <thread>
|
| 9 |
+
#include <vector>
|
| 10 |
+
|
| 11 |
+
#include <gloo/algorithm.h>
|
| 12 |
+
#include <gloo/common/error.h>
|
| 13 |
+
#include <gloo/context.h>
|
| 14 |
+
#include <gloo/rendezvous/store.h>
|
| 15 |
+
#include <gloo/transport/device.h>
|
| 16 |
+
|
| 17 |
+
#include <c10/util/hash.h>
|
| 18 |
+
|
| 19 |
+
#include <torch/csrc/distributed/c10d/Backend.hpp>
|
| 20 |
+
#include <torch/csrc/distributed/c10d/Store.hpp>
|
| 21 |
+
#include <torch/csrc/distributed/c10d/Types.hpp>
|
| 22 |
+
#include <torch/csrc/distributed/c10d/Utils.hpp>
|
| 23 |
+
|
| 24 |
+
namespace c10d {
|
| 25 |
+
|
| 26 |
+
constexpr const char* GLOO_BACKEND_NAME = "gloo";
|
| 27 |
+
|
| 28 |
+
// ProcessGroupGloo implements Gloo bindings for c10d.
|
| 29 |
+
//
|
| 30 |
+
// All functions on this class are expected to be called in the same
|
| 31 |
+
// order across processes in the group. This is the only way that we
|
| 32 |
+
// can guarantee to match up the same calls across processes. For
|
| 33 |
+
// multi-threaded usage of process groups, you can use consider using
|
| 34 |
+
// multiple process group instances.
|
| 35 |
+
//
|
| 36 |
+
// The Gloo algorithms that this class calls into are cached by their
|
| 37 |
+
// signature (see description of AlgorithmKey above). This cache works
|
| 38 |
+
// as follows: every function call instantiates an AlgorithmKey and
|
| 39 |
+
// looks in the cache for existing entries. If there is one, it is
|
| 40 |
+
// removed from the cache and returned to the caller. If there are
|
| 41 |
+
// none, a new entry is created and returned. If an entry was created
|
| 42 |
+
// before, but is still in use, the call will block and wait until the
|
| 43 |
+
// entry is returned to the cache.
|
| 44 |
+
//
|
| 45 |
+
// In the future, we hope to extend this to allow multiple entries per
|
| 46 |
+
// key, to enable parallelism for a single key. The number of entries
|
| 47 |
+
// per key must always be identical for all processes. This maximum
|
| 48 |
+
// number can be automatically tuned, but only if we let a single
|
| 49 |
+
// process take charge, and have it broadcast the limits.
|
| 50 |
+
//
|
| 51 |
+
class TORCH_API ProcessGroupGloo : public Backend {
|
| 52 |
+
public:
|
| 53 |
+
// AsyncWork is the Gloo specific superclass for asynchronous work items.
|
| 54 |
+
// We can split asynchronous work into 3 phases:
|
| 55 |
+
// 1) Sanity checks and prepare input (e.g. memcpy)
|
| 56 |
+
// 2) Run operation on background thread
|
| 57 |
+
// 3) Synchronize with completion on foreground thread
|
| 58 |
+
//
|
| 59 |
+
// There is state to be shared between these 3 phases and all of this state
|
| 60 |
+
// is captured in the AsyncWork class and its derivatives.
|
| 61 |
+
//
|
| 62 |
+
// Note: while we are porting operations to use new style collectives, there
|
| 63 |
+
// is a split between operations using the existing caching approach and
|
| 64 |
+
// operations using the new AsyncWork base class. Over time we will port
|
| 65 |
+
// all operations and perform needed cleanup.
|
| 66 |
+
//
|
| 67 |
+
// FIXME: This probably should be called WorkGloo since the work is executed
|
| 68 |
+
// in sync mode by a background thread.
|
| 69 |
+
class TORCH_API AsyncWork : public Work {
|
| 70 |
+
public:
|
| 71 |
+
explicit AsyncWork(
|
| 72 |
+
std::vector<std::vector<at::Tensor>> outputTensors,
|
| 73 |
+
OpType opType,
|
| 74 |
+
uint64_t seq,
|
| 75 |
+
const char* profilingTitle = nullptr,
|
| 76 |
+
const std::optional<std::vector<at::Tensor>>& inputTensors =
|
| 77 |
+
std::nullopt);
|
| 78 |
+
|
| 79 |
+
~AsyncWork() override = default;
|
| 80 |
+
|
| 81 |
+
static void execute(const c10::intrusive_ptr<AsyncWork>& work);
|
| 82 |
+
|
| 83 |
+
virtual void run() = 0;
|
| 84 |
+
|
| 85 |
+
std::vector<at::Tensor> result() override;
|
| 86 |
+
|
| 87 |
+
c10::intrusive_ptr<c10::ivalue::Future> getFuture() override;
|
| 88 |
+
uint64_t getSequencenumber() const override;
|
| 89 |
+
|
| 90 |
+
protected:
|
| 91 |
+
friend class ProcessGroupGloo;
|
| 92 |
+
|
| 93 |
+
private:
|
| 94 |
+
void finishWorkGloo();
|
| 95 |
+
void finishWorkGlooError(const std::exception_ptr& eptr);
|
| 96 |
+
inline void recordAsyncWorkProfilingInfo(
|
| 97 |
+
const char* profilingTitle,
|
| 98 |
+
const std::optional<std::vector<at::Tensor>>& inputTensors);
|
| 99 |
+
|
| 100 |
+
const std::vector<std::vector<at::Tensor>> outputTensors_;
|
| 101 |
+
c10::intrusive_ptr<at::ivalue::Future> future_;
|
| 102 |
+
std::function<void()> recordFunctionBeforeCallback_;
|
| 103 |
+
const uint64_t seq_;
|
| 104 |
+
};
|
| 105 |
+
|
| 106 |
+
// Wrap c10d store as Gloo store
|
| 107 |
+
class TORCH_API GlooStore : public ::gloo::rendezvous::Store {
|
| 108 |
+
public:
|
| 109 |
+
GlooStore(const c10::intrusive_ptr<::c10d::Store>& store) : store_(store) {}
|
| 110 |
+
|
| 111 |
+
void setUint(const std::string& key, const std::vector<uint8_t>& value) {
|
| 112 |
+
store_->set(key, value);
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
void set(const std::string& key, const std::vector<char>& value) override {
|
| 116 |
+
std::vector<uint8_t> tmp(value.begin(), value.end());
|
| 117 |
+
store_->set(key, tmp);
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
std::vector<uint8_t> getUint(const std::string& key) {
|
| 121 |
+
auto value = store_->get(key);
|
| 122 |
+
return value;
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
std::vector<char> get(const std::string& key) override {
|
| 126 |
+
auto value = store_->get(key);
|
| 127 |
+
return std::vector<char>(value.begin(), value.end());
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
void wait(const std::vector<std::string>& keys) override {
|
| 131 |
+
store_->wait(keys, ::c10d::Store::kDefaultTimeout);
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
void wait(
|
| 135 |
+
const std::vector<std::string>& keys,
|
| 136 |
+
const std::chrono::milliseconds& timeout) override {
|
| 137 |
+
store_->wait(keys, timeout);
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
#ifdef GLOO_STORE_HAS_STORE_V2
|
| 141 |
+
bool has_v2_support() override {
|
| 142 |
+
return store_->hasExtendedApi();
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
std::vector<std::vector<char>> multi_get(
|
| 146 |
+
const std::vector<std::string>& keys) override {
|
| 147 |
+
std::vector<std::vector<char>> res;
|
| 148 |
+
for (auto& value : store_->multiGet(keys)) {
|
| 149 |
+
res.emplace_back(value.begin(), value.end());
|
| 150 |
+
}
|
| 151 |
+
return res;
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
void multi_set(
|
| 155 |
+
const std::vector<std::string>& keys,
|
| 156 |
+
const std::vector<std::vector<char>>& values) override {
|
| 157 |
+
std::vector<std::vector<uint8_t>> u_values;
|
| 158 |
+
u_values.reserve(values.size());
|
| 159 |
+
for (auto& value : values) {
|
| 160 |
+
u_values.emplace_back(value.begin(), value.end());
|
| 161 |
+
}
|
| 162 |
+
store_->multiSet(keys, u_values);
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
void append(const std::string& key, const std::vector<char>& value)
|
| 166 |
+
override {
|
| 167 |
+
std::vector<uint8_t> tmp(value.begin(), value.end());
|
| 168 |
+
return store_->append(key, tmp);
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
int64_t add(const std::string& key, int64_t value) override {
|
| 172 |
+
return store_->add(key, value);
|
| 173 |
+
}
|
| 174 |
+
#endif
|
| 175 |
+
|
| 176 |
+
protected:
|
| 177 |
+
c10::intrusive_ptr<::c10d::Store> store_;
|
| 178 |
+
};
|
| 179 |
+
|
| 180 |
+
// For send and recv operations there is no need to pass them to the
|
| 181 |
+
// thread pool as they are entirely completed by the device thread.
|
| 182 |
+
// This work object is used to synchronize completion of the send or
|
| 183 |
+
// recv operation. It keeps a reference to the tensor it is
|
| 184 |
+
// operating on to prevent it from being deallocated while the
|
| 185 |
+
// operation is still in flight.
|
| 186 |
+
class TORCH_API SendWork : public Work {
|
| 187 |
+
public:
|
| 188 |
+
explicit SendWork(
|
| 189 |
+
at::Tensor& tensor,
|
| 190 |
+
std::unique_ptr<::gloo::transport::UnboundBuffer> buffer,
|
| 191 |
+
uint64_t seq);
|
| 192 |
+
|
| 193 |
+
bool wait(std::chrono::milliseconds timeout = kNoTimeout) override;
|
| 194 |
+
|
| 195 |
+
void abort() override;
|
| 196 |
+
|
| 197 |
+
uint64_t getSequencenumber() const override;
|
| 198 |
+
|
| 199 |
+
protected:
|
| 200 |
+
at::Tensor tensor_;
|
| 201 |
+
std::unique_ptr<::gloo::transport::UnboundBuffer> buffer_;
|
| 202 |
+
const uint64_t seq_;
|
| 203 |
+
};
|
| 204 |
+
|
| 205 |
+
class TORCH_API RecvWork : public Work {
|
| 206 |
+
public:
|
| 207 |
+
explicit RecvWork(
|
| 208 |
+
at::Tensor& tensor,
|
| 209 |
+
std::unique_ptr<::gloo::transport::UnboundBuffer> buffer,
|
| 210 |
+
OpType opType,
|
| 211 |
+
uint64_t seq,
|
| 212 |
+
const char* profilingTitle = nullptr);
|
| 213 |
+
|
| 214 |
+
int sourceRank() const override;
|
| 215 |
+
|
| 216 |
+
bool wait(std::chrono::milliseconds timeout = kNoTimeout) override;
|
| 217 |
+
|
| 218 |
+
void abort() override;
|
| 219 |
+
|
| 220 |
+
uint64_t getSequencenumber() const override;
|
| 221 |
+
|
| 222 |
+
protected:
|
| 223 |
+
at::Tensor tensor_;
|
| 224 |
+
std::unique_ptr<::gloo::transport::UnboundBuffer> buffer_;
|
| 225 |
+
int srcRank_;
|
| 226 |
+
const uint64_t seq_;
|
| 227 |
+
};
|
| 228 |
+
|
| 229 |
+
struct TORCH_API Options : public Backend::Options {
|
| 230 |
+
explicit Options(
|
| 231 |
+
std::chrono::milliseconds timeout = kBackendDefaultTimeout);
|
| 232 |
+
|
| 233 |
+
// return intrusive_ptr of the object
|
| 234 |
+
static c10::intrusive_ptr<Options> create(
|
| 235 |
+
std::chrono::milliseconds timeout = kBackendDefaultTimeout) {
|
| 236 |
+
return c10::make_intrusive<Options>(timeout);
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
std::vector<std::shared_ptr<::gloo::transport::Device>> devices;
|
| 240 |
+
int threads;
|
| 241 |
+
};
|
| 242 |
+
|
| 243 |
+
const std::string getBackendName() const override {
|
| 244 |
+
return std::string(GLOO_BACKEND_NAME);
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
// Helper functions to create a new device object.
|
| 248 |
+
// They are static functions on this class to keep them logically
|
| 249 |
+
// separate from the rest of the code base (e.g. torch/csrc/distributed).
|
| 250 |
+
|
| 251 |
+
// Create new device instance for specific interface.
|
| 252 |
+
static std::shared_ptr<::gloo::transport::Device> createDeviceForInterface(
|
| 253 |
+
const std::string& interface);
|
| 254 |
+
|
| 255 |
+
// Create new device instance for specific hostname or address.
|
| 256 |
+
static std::shared_ptr<::gloo::transport::Device> createDeviceForHostname(
|
| 257 |
+
const std::string& hostname);
|
| 258 |
+
|
| 259 |
+
// Create new device instance.
|
| 260 |
+
// It tries to resolve this machine's hostname and bind to that address.
|
| 261 |
+
// If that fails (i.e. the hostname doesn't resolve to an address), it
|
| 262 |
+
// falls back to binding to the loopback address.
|
| 263 |
+
static std::shared_ptr<::gloo::transport::Device> createDefaultDevice();
|
| 264 |
+
|
| 265 |
+
// Create ProcessGroupGloo instance.
|
| 266 |
+
static c10::intrusive_ptr<ProcessGroupGloo> createProcessGroupGloo(
|
| 267 |
+
const c10::intrusive_ptr<Store>& store,
|
| 268 |
+
int rank,
|
| 269 |
+
int size,
|
| 270 |
+
std::chrono::milliseconds timeout);
|
| 271 |
+
|
| 272 |
+
explicit ProcessGroupGloo(
|
| 273 |
+
const c10::intrusive_ptr<Store>& store,
|
| 274 |
+
int rank,
|
| 275 |
+
int size,
|
| 276 |
+
c10::intrusive_ptr<Options> options = Options::create());
|
| 277 |
+
|
| 278 |
+
~ProcessGroupGloo() override;
|
| 279 |
+
|
| 280 |
+
c10::intrusive_ptr<Options> getOptions() {
|
| 281 |
+
return options_;
|
| 282 |
+
}
|
| 283 |
+
|
| 284 |
+
c10::intrusive_ptr<Work> broadcast(
|
| 285 |
+
std::vector<at::Tensor>& tensors,
|
| 286 |
+
const BroadcastOptions& opts = BroadcastOptions()) override;
|
| 287 |
+
|
| 288 |
+
c10::intrusive_ptr<Work> allreduce(
|
| 289 |
+
std::vector<at::Tensor>& tensors,
|
| 290 |
+
const AllreduceOptions& opts = AllreduceOptions()) override;
|
| 291 |
+
|
| 292 |
+
c10::intrusive_ptr<Work> allreduce_sparse(
|
| 293 |
+
std::vector<at::Tensor>& tensors,
|
| 294 |
+
const AllreduceOptions& opts = AllreduceOptions()) override;
|
| 295 |
+
|
| 296 |
+
c10::intrusive_ptr<Work> allreduce_coalesced(
|
| 297 |
+
std::vector<at::Tensor>& tensors,
|
| 298 |
+
const AllreduceCoalescedOptions& opts =
|
| 299 |
+
AllreduceCoalescedOptions()) override;
|
| 300 |
+
|
| 301 |
+
c10::intrusive_ptr<Work> reduce(
|
| 302 |
+
std::vector<at::Tensor>& tensors,
|
| 303 |
+
const ReduceOptions& opts = ReduceOptions()) override;
|
| 304 |
+
|
| 305 |
+
c10::intrusive_ptr<Work> _reduce_scatter_base(
|
| 306 |
+
at::Tensor& outputTensor,
|
| 307 |
+
at::Tensor& inputTensor,
|
| 308 |
+
const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
|
| 309 |
+
|
| 310 |
+
c10::intrusive_ptr<Work> _allgather_base(
|
| 311 |
+
at::Tensor& output_tensor,
|
| 312 |
+
at::Tensor& input_tensor,
|
| 313 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 314 |
+
|
| 315 |
+
c10::intrusive_ptr<Work> allgather(
|
| 316 |
+
std::vector<std::vector<at::Tensor>>& outputs,
|
| 317 |
+
std::vector<at::Tensor>& inputs,
|
| 318 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 319 |
+
|
| 320 |
+
c10::intrusive_ptr<Work> allgather_coalesced(
|
| 321 |
+
std::vector<std::vector<at::Tensor>>& output_lists,
|
| 322 |
+
std::vector<at::Tensor>& input_list,
|
| 323 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 324 |
+
|
| 325 |
+
c10::intrusive_ptr<Work> allgather_into_tensor_coalesced(
|
| 326 |
+
std::vector<at::Tensor>& outputs,
|
| 327 |
+
std::vector<at::Tensor>& inputs,
|
| 328 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 329 |
+
|
| 330 |
+
c10::intrusive_ptr<Work> gather(
|
| 331 |
+
std::vector<std::vector<at::Tensor>>& outputs,
|
| 332 |
+
std::vector<at::Tensor>& inputs,
|
| 333 |
+
const GatherOptions& opts = GatherOptions()) override;
|
| 334 |
+
|
| 335 |
+
c10::intrusive_ptr<Work> scatter(
|
| 336 |
+
std::vector<at::Tensor>& outputs,
|
| 337 |
+
std::vector<std::vector<at::Tensor>>& inputs,
|
| 338 |
+
const ScatterOptions& opts = ScatterOptions()) override;
|
| 339 |
+
|
| 340 |
+
c10::intrusive_ptr<Work> reduce_scatter(
|
| 341 |
+
std::vector<at::Tensor>& outputs,
|
| 342 |
+
std::vector<std::vector<at::Tensor>>& inputs,
|
| 343 |
+
const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
|
| 344 |
+
|
| 345 |
+
c10::intrusive_ptr<Work> reduce_scatter_tensor_coalesced(
|
| 346 |
+
std::vector<at::Tensor>& outputTensors,
|
| 347 |
+
std::vector<at::Tensor>& inputTensors,
|
| 348 |
+
const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
|
| 349 |
+
|
| 350 |
+
c10::intrusive_ptr<Work> alltoall_base(
|
| 351 |
+
at::Tensor& outputTensor,
|
| 352 |
+
at::Tensor& inputTensor,
|
| 353 |
+
std::vector<int64_t>& outputCounts,
|
| 354 |
+
std::vector<int64_t>& inputCounts,
|
| 355 |
+
const AllToAllOptions& opts = AllToAllOptions()) override;
|
| 356 |
+
|
| 357 |
+
c10::intrusive_ptr<Work> send(
|
| 358 |
+
std::vector<at::Tensor>& tensors,
|
| 359 |
+
int dstRank,
|
| 360 |
+
int tag) override;
|
| 361 |
+
|
| 362 |
+
c10::intrusive_ptr<Work> recv(
|
| 363 |
+
std::vector<at::Tensor>& tensors,
|
| 364 |
+
int srcRank,
|
| 365 |
+
int tag) override;
|
| 366 |
+
|
| 367 |
+
c10::intrusive_ptr<Work> recvAnysource(
|
| 368 |
+
std::vector<at::Tensor>& tensors,
|
| 369 |
+
int tag) override;
|
| 370 |
+
|
| 371 |
+
c10::intrusive_ptr<Work> barrier(
|
| 372 |
+
const BarrierOptions& opts = BarrierOptions()) override;
|
| 373 |
+
|
| 374 |
+
void enableCollectivesTiming() override;
|
| 375 |
+
|
| 376 |
+
const std::unique_ptr<::gloo::rendezvous::Store>& _getStore() const {
|
| 377 |
+
return store_;
|
| 378 |
+
}
|
| 379 |
+
|
| 380 |
+
// Similar to barrier(), but blocks rank 0 until all other ranks have
|
| 381 |
+
// acknowledged that they are alive (through send/recv from rank 0). Rank 0
|
| 382 |
+
// is able to report all failed ranks if waitAllRanks = true, otherwise
|
| 383 |
+
// reports the first rank it detected as failed.
|
| 384 |
+
void monitoredBarrier(
|
| 385 |
+
const BarrierOptions& opts = BarrierOptions(),
|
| 386 |
+
bool waitAllRanks = false) override;
|
| 387 |
+
|
| 388 |
+
// Agrees on an initial sequence number for the whole group by having rank 0
|
| 389 |
+
// create it and broadcast it to other ranks using the store.
|
| 390 |
+
void setSequenceNumberForGroup() override;
|
| 391 |
+
|
| 392 |
+
// Retrieves the current sequence number for the whole group, which should be
|
| 393 |
+
// in sync. If the returned number is not consistent across the group, it
|
| 394 |
+
// may indicate that there is some sort of collective desynchronization.
|
| 395 |
+
uint64_t getSequenceNumberForGroup() override;
|
| 396 |
+
|
| 397 |
+
int getNumThreads() {
|
| 398 |
+
return options_->threads;
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
protected:
|
| 402 |
+
std::unique_ptr<::gloo::rendezvous::Store> store_;
|
| 403 |
+
const c10::intrusive_ptr<Options> options_;
|
| 404 |
+
|
| 405 |
+
// Every Gloo context represents a set of connections to its peers.
|
| 406 |
+
// In order to use more than one device (or allow for parallelism on
|
| 407 |
+
// a single device), you need multiple contexts.
|
| 408 |
+
std::vector<std::shared_ptr<::gloo::Context>> contexts_;
|
| 409 |
+
std::vector<std::thread> threads_;
|
| 410 |
+
bool stop_;
|
| 411 |
+
|
| 412 |
+
// Incremented for every collective we kick off.
|
| 413 |
+
// The value is used as tag for collective operations. Collectives are kicked
|
| 414 |
+
// off in identical order across processes. Therefore the tag can be used
|
| 415 |
+
// to match up operations during concurrent execution.
|
| 416 |
+
uint32_t collectiveCounter_;
|
| 417 |
+
|
| 418 |
+
// Returns next collective tag to use (uses collectiveCounter_).
|
| 419 |
+
uint32_t nextTag();
|
| 420 |
+
|
| 421 |
+
// Returns the context to use for the specified tag.
|
| 422 |
+
// With `nextTag` returning an increasing number, this should lead
|
| 423 |
+
// to contexts being used in a round-robin fashion.
|
| 424 |
+
std::shared_ptr<::gloo::Context> getContext(uint32_t tag);
|
| 425 |
+
|
| 426 |
+
// Entrypoint for worker threads.
|
| 427 |
+
void runLoop(int workerIndex);
|
| 428 |
+
|
| 429 |
+
// Queue work to run on worker thread.
|
| 430 |
+
void enqueue(c10::intrusive_ptr<AsyncWork> work);
|
| 431 |
+
|
| 432 |
+
// Keep both a queue of pending work, and a vector with in progress work.
|
| 433 |
+
// Both of these can only be mutated when holding the queue lock.
|
| 434 |
+
// We keep both around instead of just the queue, so we can grab a weak_ptr
|
| 435 |
+
// to all in progress and pending work when executing a barrier.
|
| 436 |
+
// When executing a barrier, we need to ensure that all prior work
|
| 437 |
+
// has completed before completing itself.
|
| 438 |
+
std::deque<c10::intrusive_ptr<AsyncWork>> workQueue_;
|
| 439 |
+
std::vector<c10::intrusive_ptr<AsyncWork>> workInProgress_;
|
| 440 |
+
std::mutex workMutex_;
|
| 441 |
+
std::condition_variable workProduceCV_;
|
| 442 |
+
std::condition_variable workConsumeCV_;
|
| 443 |
+
uint64_t seq_{0};
|
| 444 |
+
};
|
| 445 |
+
|
| 446 |
+
} // namespace c10d
|
| 447 |
+
|
| 448 |
+
#endif // USE_C10D_GLOO
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp
ADDED
|
@@ -0,0 +1,271 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifdef USE_C10D_MPI
|
| 4 |
+
|
| 5 |
+
#include <condition_variable>
|
| 6 |
+
#include <deque>
|
| 7 |
+
#include <exception>
|
| 8 |
+
#include <memory>
|
| 9 |
+
#include <mutex>
|
| 10 |
+
#include <thread>
|
| 11 |
+
#include <vector>
|
| 12 |
+
|
| 13 |
+
#include <ATen/core/ivalue.h>
|
| 14 |
+
#include <ATen/core/ivalue_inl.h>
|
| 15 |
+
|
| 16 |
+
#include <torch/csrc/distributed/c10d/Backend.hpp>
|
| 17 |
+
#include <torch/csrc/distributed/c10d/Types.hpp>
|
| 18 |
+
#include <torch/csrc/distributed/c10d/Utils.hpp>
|
| 19 |
+
|
| 20 |
+
#include <c10/util/CallOnce.h>
|
| 21 |
+
|
| 22 |
+
#include <mpi.h>
|
| 23 |
+
|
| 24 |
+
namespace c10d {
|
| 25 |
+
|
| 26 |
+
constexpr const char* MPI_BACKEND_NAME = "mpi";
|
| 27 |
+
|
| 28 |
+
// WorkEntry is the state associated with a single MPI run instance.
|
| 29 |
+
// It include the source Tensor list and destination Tensor list, as well as
|
| 30 |
+
// The actual run function that will operate either on src or dst or both.
|
| 31 |
+
struct WorkEntry {
|
| 32 |
+
explicit WorkEntry(
|
| 33 |
+
std::vector<at::Tensor>* srcPtr,
|
| 34 |
+
std::vector<at::Tensor>* dstPtr,
|
| 35 |
+
std::function<void(std::unique_ptr<WorkEntry>&)> run)
|
| 36 |
+
: dst(dstPtr ? *dstPtr : std::vector<at::Tensor>()), run(std::move(run)) {
|
| 37 |
+
if (srcPtr) {
|
| 38 |
+
src = *srcPtr;
|
| 39 |
+
}
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
// Not copyable
|
| 43 |
+
WorkEntry(const WorkEntry&) = delete;
|
| 44 |
+
// Not copy assignable
|
| 45 |
+
WorkEntry& operator=(const WorkEntry&) = delete;
|
| 46 |
+
|
| 47 |
+
// For input and output tensors (in-place), we will always use src
|
| 48 |
+
std::vector<at::Tensor> src;
|
| 49 |
+
|
| 50 |
+
// Copy of user provided outputs.
|
| 51 |
+
const std::vector<at::Tensor> dst;
|
| 52 |
+
|
| 53 |
+
// src rank returned, for recv only
|
| 54 |
+
int* srcRank = nullptr;
|
| 55 |
+
std::function<void(std::unique_ptr<WorkEntry>&)> run;
|
| 56 |
+
};
|
| 57 |
+
|
| 58 |
+
// ProcessGroupMPI implements MPI bindings for c10d.
|
| 59 |
+
//
|
| 60 |
+
// All functions on this class are expected to be called in the same
|
| 61 |
+
// order across processes in the group. This is the only way that we
|
| 62 |
+
// can guarantee to match up the same calls across processes.
|
| 63 |
+
//
|
| 64 |
+
// All MPI functions provided by this class is asynchronously scheduled on a
|
| 65 |
+
// Worker thread. Therefore, ProcessGroupMPI requires the MPI implementation
|
| 66 |
+
// that is used to have a minimum thread support value of MPI_THREAD_SERIALIZED.
|
| 67 |
+
// That is, The process may be multi-threaded, and multiple threads may make
|
| 68 |
+
// MPI calls, but only one at a time: MPI calls are not made concurrently from
|
| 69 |
+
// two distinct threads (all MPI calls are serialized). However, with
|
| 70 |
+
// MPI_THREAD_SERIALIZED, ProcessGroupMPI will only support a singe process
|
| 71 |
+
// group. In other words, no more than 1 process group can be created globally.
|
| 72 |
+
//
|
| 73 |
+
// If you would like to use multiple ProcessGroupMPI, it requires your MPI
|
| 74 |
+
// implementation to have a thread support value of MPI_THREAD_MULTIPLE, that
|
| 75 |
+
// is, multiple threads may call MPI, with no restriction.
|
| 76 |
+
//
|
| 77 |
+
// Also note that ProcessGroupMPI only supports a single Tensor operation. In
|
| 78 |
+
// other words, the size of the input Tensor vector should always be 1.
|
| 79 |
+
//
|
| 80 |
+
// CUDA tensor can be supported if the MPI used is CUDA-aware MPI, and
|
| 81 |
+
// ProcessGroupMPI will automatically detect this support.
|
| 82 |
+
class TORCH_API ProcessGroupMPI : public Backend {
|
| 83 |
+
public:
|
| 84 |
+
class WorkMPI : public Work {
|
| 85 |
+
public:
|
| 86 |
+
explicit WorkMPI(
|
| 87 |
+
std::vector<at::Tensor> outputTensors,
|
| 88 |
+
const char* profilingTitle = nullptr,
|
| 89 |
+
const std::optional<std::vector<at::Tensor>>& inputTensors =
|
| 90 |
+
std::nullopt)
|
| 91 |
+
: Work(-1, OpType::UNKNOWN, profilingTitle, inputTensors),
|
| 92 |
+
outputTensors_(std::move(outputTensors)),
|
| 93 |
+
future_(c10::make_intrusive<at::ivalue::Future>(
|
| 94 |
+
c10::ListType::create(c10::TensorType::get()))) {}
|
| 95 |
+
|
| 96 |
+
std::vector<at::Tensor> result() override;
|
| 97 |
+
|
| 98 |
+
c10::intrusive_ptr<c10::ivalue::Future> getFuture() override;
|
| 99 |
+
|
| 100 |
+
protected:
|
| 101 |
+
friend class ProcessGroupMPI;
|
| 102 |
+
|
| 103 |
+
private:
|
| 104 |
+
void finishWorkMPI();
|
| 105 |
+
void finishWorkMPIError(const std::exception_ptr& eptr);
|
| 106 |
+
|
| 107 |
+
std::vector<at::Tensor> outputTensors_;
|
| 108 |
+
c10::intrusive_ptr<at::ivalue::Future> future_;
|
| 109 |
+
};
|
| 110 |
+
|
| 111 |
+
class AsyncWork : public Work {
|
| 112 |
+
public:
|
| 113 |
+
AsyncWork(
|
| 114 |
+
MPI_Request request,
|
| 115 |
+
std::vector<at::Tensor> outputTensors,
|
| 116 |
+
const char* profilingTitle = nullptr,
|
| 117 |
+
const std::optional<std::vector<at::Tensor>>& inputTensors =
|
| 118 |
+
std::nullopt);
|
| 119 |
+
|
| 120 |
+
~AsyncWork() override;
|
| 121 |
+
|
| 122 |
+
bool isCompleted() override;
|
| 123 |
+
|
| 124 |
+
bool isSuccess() const override;
|
| 125 |
+
|
| 126 |
+
int sourceRank() const override;
|
| 127 |
+
|
| 128 |
+
bool wait(std::chrono::milliseconds timeout = kUnsetTimeout) override;
|
| 129 |
+
|
| 130 |
+
void abort() override;
|
| 131 |
+
|
| 132 |
+
std::vector<at::Tensor> result() override;
|
| 133 |
+
|
| 134 |
+
protected:
|
| 135 |
+
void populateException();
|
| 136 |
+
|
| 137 |
+
private:
|
| 138 |
+
const std::vector<at::Tensor> outputTensors_;
|
| 139 |
+
MPI_Request request_;
|
| 140 |
+
MPI_Status status_{};
|
| 141 |
+
};
|
| 142 |
+
|
| 143 |
+
// Constructor will spawn up the worker thread loop
|
| 144 |
+
explicit ProcessGroupMPI(int rank, int size, MPI_Comm pgComm);
|
| 145 |
+
|
| 146 |
+
~ProcessGroupMPI() override;
|
| 147 |
+
|
| 148 |
+
// Abort the MPI program, needs to be called when exception is detected
|
| 149 |
+
void abort();
|
| 150 |
+
|
| 151 |
+
const std::string getBackendName() const override {
|
| 152 |
+
return std::string(MPI_BACKEND_NAME);
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
c10::intrusive_ptr<Work> broadcast(
|
| 156 |
+
std::vector<at::Tensor>& data,
|
| 157 |
+
const BroadcastOptions& opts = BroadcastOptions()) override;
|
| 158 |
+
|
| 159 |
+
c10::intrusive_ptr<Work> allreduce(
|
| 160 |
+
std::vector<at::Tensor>& tensors,
|
| 161 |
+
const AllreduceOptions& opts = AllreduceOptions()) override;
|
| 162 |
+
|
| 163 |
+
c10::intrusive_ptr<Work> allreduce_coalesced(
|
| 164 |
+
std::vector<at::Tensor>& tensors,
|
| 165 |
+
const AllreduceCoalescedOptions& opts =
|
| 166 |
+
AllreduceCoalescedOptions()) override;
|
| 167 |
+
|
| 168 |
+
c10::intrusive_ptr<Work> reduce(
|
| 169 |
+
std::vector<at::Tensor>& tensors,
|
| 170 |
+
const ReduceOptions& opts = ReduceOptions()) override;
|
| 171 |
+
|
| 172 |
+
c10::intrusive_ptr<Work> allgather(
|
| 173 |
+
std::vector<std::vector<at::Tensor>>& outputTensors,
|
| 174 |
+
std::vector<at::Tensor>& inputTensors,
|
| 175 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 176 |
+
|
| 177 |
+
c10::intrusive_ptr<Work> _allgather_base(
|
| 178 |
+
at::Tensor& outputbuffer,
|
| 179 |
+
at::Tensor& inputbuffer,
|
| 180 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 181 |
+
|
| 182 |
+
c10::intrusive_ptr<Work> allgather_coalesced(
|
| 183 |
+
std::vector<std::vector<at::Tensor>>& outputTensorLists,
|
| 184 |
+
std::vector<at::Tensor>& inputTensors,
|
| 185 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 186 |
+
|
| 187 |
+
c10::intrusive_ptr<Work> gather(
|
| 188 |
+
std::vector<std::vector<at::Tensor>>& outputTensors,
|
| 189 |
+
std::vector<at::Tensor>& inputTensors,
|
| 190 |
+
const GatherOptions& opts = GatherOptions()) override;
|
| 191 |
+
|
| 192 |
+
c10::intrusive_ptr<Work> scatter(
|
| 193 |
+
std::vector<at::Tensor>& outputTensors,
|
| 194 |
+
std::vector<std::vector<at::Tensor>>& inputTensors,
|
| 195 |
+
const ScatterOptions& opts = ScatterOptions()) override;
|
| 196 |
+
|
| 197 |
+
c10::intrusive_ptr<Work> reduce_scatter(
|
| 198 |
+
std::vector<at::Tensor>& outputTensors,
|
| 199 |
+
std::vector<std::vector<at::Tensor>>& inputTensors,
|
| 200 |
+
const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
|
| 201 |
+
|
| 202 |
+
c10::intrusive_ptr<Work> alltoall_base(
|
| 203 |
+
at::Tensor& outputTensor,
|
| 204 |
+
at::Tensor& inputTensor,
|
| 205 |
+
std::vector<int64_t>& outputSplitSizes,
|
| 206 |
+
std::vector<int64_t>& inputSplitSizes,
|
| 207 |
+
const AllToAllOptions& opts = AllToAllOptions()) override;
|
| 208 |
+
|
| 209 |
+
c10::intrusive_ptr<Work> alltoall(
|
| 210 |
+
std::vector<at::Tensor>& outputTensors,
|
| 211 |
+
std::vector<at::Tensor>& inputTensors,
|
| 212 |
+
const AllToAllOptions& opts = AllToAllOptions()) override;
|
| 213 |
+
|
| 214 |
+
c10::intrusive_ptr<Work> send(
|
| 215 |
+
std::vector<at::Tensor>& tensors,
|
| 216 |
+
int dstRank,
|
| 217 |
+
int tag) override;
|
| 218 |
+
|
| 219 |
+
c10::intrusive_ptr<Work> recv(
|
| 220 |
+
std::vector<at::Tensor>& tensors,
|
| 221 |
+
int srcRank,
|
| 222 |
+
int tag) override;
|
| 223 |
+
|
| 224 |
+
c10::intrusive_ptr<Work> recvAnysource(
|
| 225 |
+
std::vector<at::Tensor>& tensor,
|
| 226 |
+
int tag) override;
|
| 227 |
+
|
| 228 |
+
c10::intrusive_ptr<Work> barrier(
|
| 229 |
+
const BarrierOptions& opts = BarrierOptions()) override;
|
| 230 |
+
|
| 231 |
+
// Creating a new ProcessGroupMPI, will initialize MPI if not initialized
|
| 232 |
+
static c10::intrusive_ptr<ProcessGroupMPI> createProcessGroupMPI(
|
| 233 |
+
std::vector<int> ranks = {});
|
| 234 |
+
|
| 235 |
+
protected:
|
| 236 |
+
using WorkType =
|
| 237 |
+
std::tuple<std::unique_ptr<WorkEntry>, c10::intrusive_ptr<WorkMPI>>;
|
| 238 |
+
// Worker thread loop
|
| 239 |
+
void runLoop();
|
| 240 |
+
// Helper function that is called by the destructor
|
| 241 |
+
void destroy();
|
| 242 |
+
|
| 243 |
+
c10::intrusive_ptr<Work> enqueue(
|
| 244 |
+
std::unique_ptr<WorkEntry> entry,
|
| 245 |
+
const char* profilingTitle = nullptr,
|
| 246 |
+
const std::optional<std::vector<at::Tensor>>& inputTensors =
|
| 247 |
+
std::nullopt);
|
| 248 |
+
|
| 249 |
+
bool stop_;
|
| 250 |
+
|
| 251 |
+
std::mutex pgMutex_;
|
| 252 |
+
std::thread workerThread_;
|
| 253 |
+
|
| 254 |
+
std::deque<WorkType> queue_;
|
| 255 |
+
std::condition_variable queueProduceCV_;
|
| 256 |
+
std::condition_variable queueConsumeCV_;
|
| 257 |
+
|
| 258 |
+
// Global states
|
| 259 |
+
static void initMPIOnce();
|
| 260 |
+
static void mpiExit();
|
| 261 |
+
static c10::once_flag onceFlagInitMPI;
|
| 262 |
+
|
| 263 |
+
static std::mutex pgGlobalMutex_;
|
| 264 |
+
static int mpiThreadSupport_;
|
| 265 |
+
|
| 266 |
+
MPI_Comm pgComm_;
|
| 267 |
+
};
|
| 268 |
+
|
| 269 |
+
} // namespace c10d
|
| 270 |
+
|
| 271 |
+
#endif // USE_C10D_MPI
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp
ADDED
|
@@ -0,0 +1,1232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifdef USE_C10D_NCCL
|
| 4 |
+
|
| 5 |
+
#if defined(__linux__)
|
| 6 |
+
#include <fcntl.h>
|
| 7 |
+
#include <sys/stat.h>
|
| 8 |
+
#include <sys/types.h>
|
| 9 |
+
#include <unistd.h>
|
| 10 |
+
#endif
|
| 11 |
+
|
| 12 |
+
#include <atomic>
|
| 13 |
+
#include <chrono>
|
| 14 |
+
#include <future>
|
| 15 |
+
#include <iostream>
|
| 16 |
+
#include <list>
|
| 17 |
+
#include <mutex>
|
| 18 |
+
#include <thread>
|
| 19 |
+
#include <unordered_map>
|
| 20 |
+
|
| 21 |
+
#include <torch/csrc/distributed/c10d/Backend.hpp>
|
| 22 |
+
#include <torch/csrc/distributed/c10d/NCCLUtils.hpp>
|
| 23 |
+
#include <torch/csrc/distributed/c10d/PrefixStore.hpp>
|
| 24 |
+
#include <torch/csrc/distributed/c10d/Store.hpp>
|
| 25 |
+
#include <torch/csrc/distributed/c10d/intra_node_comm.hpp>
|
| 26 |
+
|
| 27 |
+
#include <ATen/DynamicLibrary.h>
|
| 28 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 29 |
+
#include <ATen/cuda/CUDAEvent.h>
|
| 30 |
+
#include <c10/core/Stream.h>
|
| 31 |
+
#include <c10/core/StreamGuard.h>
|
| 32 |
+
#include <c10/cuda/CUDACachingAllocator.h>
|
| 33 |
+
#include <c10/cuda/CUDAGuard.h>
|
| 34 |
+
#include <c10/cuda/CUDAStream.h>
|
| 35 |
+
|
| 36 |
+
#include <torch/custom_class.h>
|
| 37 |
+
|
| 38 |
+
namespace c10d {
|
| 39 |
+
|
| 40 |
+
// Control broadcasting of NCCL uniqueId
|
| 41 |
+
static std::vector<std::string> TORCH_NCCL_BCAST_UNIQUEID = {
|
| 42 |
+
"TORCH_NCCL_BCAST_UNIQUEID"};
|
| 43 |
+
|
| 44 |
+
// Control whether to always use high priority streams
|
| 45 |
+
static std::vector<std::string> TORCH_NCCL_HIGH_PRIORITY = {
|
| 46 |
+
"TORCH_NCCL_HIGH_PRIORITY"};
|
| 47 |
+
|
| 48 |
+
// Control whether or not wait() is blocking or non-blocking.
|
| 49 |
+
static std::vector<std::string> TORCH_NCCL_BLOCKING_WAIT = {
|
| 50 |
+
"TORCH_NCCL_BLOCKING_WAIT",
|
| 51 |
+
"NCCL_BLOCKING_WAIT"};
|
| 52 |
+
|
| 53 |
+
// TODO: We want to eventually remove this variable and make users to use
|
| 54 |
+
// the default value (3 - SkipCleanUp).
|
| 55 |
+
// Control whether or not we perform Async Error Handling with NCCL.
|
| 56 |
+
static std::vector<std::string> TORCH_NCCL_ASYNC_ERROR_HANDLING = {
|
| 57 |
+
"TORCH_NCCL_ASYNC_ERROR_HANDLING",
|
| 58 |
+
"NCCL_ASYNC_ERROR_HANDLING"};
|
| 59 |
+
|
| 60 |
+
// Control whether dumping debug info on watchdog
|
| 61 |
+
// timeout is enabled. This variable must be set together with
|
| 62 |
+
// TORCH_NCCL_ENABLE_MONITORING=1 and TORCH_NCCL_TRACE_BUFFER_SIZE > 0.
|
| 63 |
+
static std::vector<std::string> TORCH_NCCL_DUMP_ON_TIMEOUT = {
|
| 64 |
+
"TORCH_NCCL_DUMP_ON_TIMEOUT"};
|
| 65 |
+
|
| 66 |
+
// Control whether Desync Debug is enabled. This variable must be set
|
| 67 |
+
// together with TORCH_NCCL_ASYNC_ERROR_HANDLING.
|
| 68 |
+
static std::vector<std::string> TORCH_NCCL_DESYNC_DEBUG = {
|
| 69 |
+
"TORCH_NCCL_DESYNC_DEBUG",
|
| 70 |
+
"NCCL_DESYNC_DEBUG"};
|
| 71 |
+
|
| 72 |
+
// Enable recording start-events for all ProcessGroupNCCL collectives, and
|
| 73 |
+
// compute accurate collective timing per-collective. (Note: end-events are
|
| 74 |
+
// recorded by default. Turn on this flag can increase chances of a watchdog
|
| 75 |
+
// hang due to performing a CUDA event query which eventually calls
|
| 76 |
+
// cudaEventElapsedTime() API.
|
| 77 |
+
static std::vector<std::string> TORCH_NCCL_ENABLE_TIMING = {
|
| 78 |
+
"TORCH_NCCL_ENABLE_TIMING",
|
| 79 |
+
"NCCL_ENABLE_TIMING"};
|
| 80 |
+
|
| 81 |
+
// Enable monitoring thread which aborts the process when the ProcessGroupNCCL
|
| 82 |
+
// Watchdog thread gets stuck and no heartbeat is detected after
|
| 83 |
+
// TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC. This can happen due to calling CUDA/NCCL
|
| 84 |
+
// APIs that may hang. It is Useful to prevent jobs being stuck for a prolonged
|
| 85 |
+
// time than necessary tying up cluster resources.
|
| 86 |
+
static std::vector<std::string> TORCH_NCCL_ENABLE_MONITORING = {
|
| 87 |
+
"TORCH_NCCL_ENABLE_MONITORING"};
|
| 88 |
+
|
| 89 |
+
// Control the watchdog heartbeat timeout period after which the monitoring
|
| 90 |
+
// thread will abort the process.
|
| 91 |
+
static std::vector<std::string> TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC = {
|
| 92 |
+
"TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC"};
|
| 93 |
+
|
| 94 |
+
// Whether to rethrow CUDA Errors in the watchdog (default true)
|
| 95 |
+
static std::vector<std::string> TORCH_NCCL_RETHROW_CUDA_ERRORS = {
|
| 96 |
+
"TORCH_NCCL_RETHROW_CUDA_ERRORS"};
|
| 97 |
+
|
| 98 |
+
// The maximum number of events we store in the flight recorder's ring buffer.
|
| 99 |
+
// (One event could be the start or end of a collective, for example).
|
| 100 |
+
static std::vector<std::string> TORCH_NCCL_TRACE_BUFFER_SIZE = {
|
| 101 |
+
"TORCH_NCCL_TRACE_BUFFER_SIZE"};
|
| 102 |
+
|
| 103 |
+
// Control how much extra time we will wait for dumping the debugging info
|
| 104 |
+
// before we exit and throws timeout exception.
|
| 105 |
+
static std::vector<std::string> TORCH_NCCL_WAIT_TIMEOUT_DUMP_MILSEC = {
|
| 106 |
+
"TORCH_NCCL_WAIT_TIMEOUT_DUMP_MILSEC"};
|
| 107 |
+
|
| 108 |
+
// Control the interval inside the monitoring thread to check the coordinated
|
| 109 |
+
// signal from other ranks, e.g. to dump the debugging information.
|
| 110 |
+
static std::vector<std::string> TORCH_NCCL_COORD_CHECK_MILSEC = {
|
| 111 |
+
"TORCH_NCCL_COORD_CHECK_MILSEC"};
|
| 112 |
+
|
| 113 |
+
// Whether to log C++ stack traces on unclean shutdown (default true)
|
| 114 |
+
static std::vector<std::string> TORCH_NCCL_LOG_CPP_STACK_ON_UNCLEAN_SHUTDOWN = {
|
| 115 |
+
"TORCH_NCCL_LOG_CPP_STACK_ON_UNCLEAN_SHUTDOWN"};
|
| 116 |
+
|
| 117 |
+
// Control whether to use CudaEventCache for the collective in watchdog thread.
|
| 118 |
+
// We noticed in the past when cuda global lock is held, destroying CudaEvent
|
| 119 |
+
// can cause a hang.
|
| 120 |
+
static std::vector<std::string> TORCH_NCCL_CUDA_EVENT_CACHE = {
|
| 121 |
+
"TORCH_NCCL_CUDA_EVENT_CACHE"};
|
| 122 |
+
|
| 123 |
+
static std::vector<std::string> TORCH_NCCL_NAN_CHECK = {"TORCH_NCCL_NAN_CHECK"};
|
| 124 |
+
|
| 125 |
+
constexpr const char* NCCL_BACKEND_NAME = "nccl";
|
| 126 |
+
|
| 127 |
+
constexpr const char* EXCEPTION_DUMP = "exception_dump";
|
| 128 |
+
|
| 129 |
+
constexpr const int kWorkStatusUpdatePeriodMs = 30 * 1000; // 30 seconds
|
| 130 |
+
|
| 131 |
+
constexpr auto kProcessGroupNCCLDefaultTimeout =
|
| 132 |
+
std::chrono::milliseconds(10 * 60 * 1000);
|
| 133 |
+
|
| 134 |
+
// NoHandling: do not handle asynchronous NCCL errors
|
| 135 |
+
// TearDown: tear down process upon error, see `WorkNCCL::handleException`
|
| 136 |
+
// CleanUpOnly: just clean up collectives and abort communicators without
|
| 137 |
+
// tearing down process SkipCleanUp: (this is a temporary option and can be
|
| 138 |
+
// removed in future) tear down process without cleaning up NCCL communicators.
|
| 139 |
+
// This should be used as a last resort in case `ncclCommAbort` itself is
|
| 140 |
+
// hanging
|
| 141 |
+
enum ErrorHandlingMode {
|
| 142 |
+
NoHandling = 0,
|
| 143 |
+
TearDown = 1,
|
| 144 |
+
CleanUpOnly = 2,
|
| 145 |
+
SkipCleanUp = 3
|
| 146 |
+
};
|
| 147 |
+
|
| 148 |
+
#define SHOULD_CLEAN_UP(a) (a != NoHandling && a != SkipCleanUp)
|
| 149 |
+
|
| 150 |
+
#define SHOULD_TEAR_DOWN(a) (a != NoHandling && a != CleanUpOnly)
|
| 151 |
+
|
| 152 |
+
#define PRINT_COLLECTIVE_HASH_SIGNATURE(phase, opType, numel, hashValue) \
|
| 153 |
+
LOG(WARNING) << logPrefix() << "Hash of " << phase << " to NCCL " << opType \
|
| 154 |
+
<< " with size " << numel << " is " << hashValue;
|
| 155 |
+
|
| 156 |
+
// If set, ProcessGroupNCCL doesn't use recordStream calls to ensure
|
| 157 |
+
// caching allocator safety for tensors used on both user-facing and
|
| 158 |
+
// internal comm streams.
|
| 159 |
+
// Instead, it stashes live references to those tensors until after
|
| 160 |
+
// user-facing streams are synced with comm streams.
|
| 161 |
+
// See stashed_for_allocator_safety_ below.
|
| 162 |
+
static std::vector<std::string> TORCH_NCCL_AVOID_RECORD_STREAMS = {
|
| 163 |
+
"TORCH_NCCL_AVOID_RECORD_STREAMS"};
|
| 164 |
+
|
| 165 |
+
// If set, ProcessGroupNCCL registers postAlloc and preFree hooks to cuda cache
|
| 166 |
+
// allocator so that whenever a tensor is allocated or freed, ProcessGroupNCCL
|
| 167 |
+
// can register/deregister the tensor on all available NCCL communicators.
|
| 168 |
+
static std::vector<std::string> TORCH_NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK =
|
| 169 |
+
{"TORCH_NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK",
|
| 170 |
+
"NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK"};
|
| 171 |
+
|
| 172 |
+
#if defined(__linux__)
|
| 173 |
+
struct DumpPipe {
|
| 174 |
+
DumpPipe(int rank) {
|
| 175 |
+
std::string fileStem =
|
| 176 |
+
getCvarString({"TORCH_NCCL_DEBUG_INFO_PIPE_FILE"}, "");
|
| 177 |
+
if (fileStem.empty() ||
|
| 178 |
+
getCvarInt({"TORCH_NCCL_TRACE_BUFFER_SIZE"}, 0) <= 0) {
|
| 179 |
+
return;
|
| 180 |
+
}
|
| 181 |
+
TORCH_CHECK(!fileStem.empty(), "TORCH_NCCL_DEBUG_INFO_TEMP_FILE is empty");
|
| 182 |
+
std::string filename = c10::str(fileStem, rank, ".pipe");
|
| 183 |
+
TORCH_CHECK(
|
| 184 |
+
unlink(filename.c_str()) != -1 || errno == ENOENT,
|
| 185 |
+
"Error removing existing named pipe ",
|
| 186 |
+
filename);
|
| 187 |
+
TORCH_CHECK(
|
| 188 |
+
mkfifo(filename.c_str(), 0666) != -1,
|
| 189 |
+
"Error creating named pipe ",
|
| 190 |
+
filename);
|
| 191 |
+
fd_ = open(filename.c_str(), O_RDONLY | O_NONBLOCK);
|
| 192 |
+
LOG(INFO) << "Pipe file " << filename
|
| 193 |
+
<< " has been opened, write to it to trigger NCCL Debug Dump.";
|
| 194 |
+
TORCH_CHECK(fd_ != -1, "Error opening named pipe ", filename);
|
| 195 |
+
}
|
| 196 |
+
bool shouldDump() {
|
| 197 |
+
if (fd_ == -1) {
|
| 198 |
+
return false;
|
| 199 |
+
}
|
| 200 |
+
char buf[128];
|
| 201 |
+
// non-blocking from O_NONBLOCK above.
|
| 202 |
+
// Ignore EINTR because we already will poll this
|
| 203 |
+
// again later.
|
| 204 |
+
ssize_t bytesRead = read(fd_, &buf, 128);
|
| 205 |
+
return bytesRead > 0;
|
| 206 |
+
}
|
| 207 |
+
~DumpPipe() {
|
| 208 |
+
if (fd_ != -1) {
|
| 209 |
+
close(fd_);
|
| 210 |
+
}
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
private:
|
| 214 |
+
int fd_ = -1;
|
| 215 |
+
};
|
| 216 |
+
#else
|
| 217 |
+
struct DumpPipe {
|
| 218 |
+
DumpPipe(int rank) {}
|
| 219 |
+
bool shouldDump() {
|
| 220 |
+
return false;
|
| 221 |
+
}
|
| 222 |
+
};
|
| 223 |
+
#endif
|
| 224 |
+
|
| 225 |
+
// ProcessGroupNCCL implements NCCL bindings for c10d.
|
| 226 |
+
//
|
| 227 |
+
// All functions of the class are expected to be called in the same order
|
| 228 |
+
// across all processes in the process group. This is the only way that we
|
| 229 |
+
// can guarantee to match up the same calls among all processes.
|
| 230 |
+
//
|
| 231 |
+
// All NCCL functions provided by this class are asynchronous functions. More
|
| 232 |
+
// specifically, each NCCL call is scheduled on a separate CUDA stream that is
|
| 233 |
+
// different from the current CUDA stream. This is for the purpose of
|
| 234 |
+
// achieving potentially concurrency and better performance. As a result,
|
| 235 |
+
// it is the callers' responsibility to make sure that the CUDA stream their
|
| 236 |
+
// code works on needs to wait for the NCCL operation from
|
| 237 |
+
// this class.
|
| 238 |
+
//
|
| 239 |
+
// This can be done by calling:
|
| 240 |
+
//
|
| 241 |
+
// either WorkNCCL::wait() or WorkNCCL::synchronize(), both achieves the same
|
| 242 |
+
// functionality and are synonyms.
|
| 243 |
+
//
|
| 244 |
+
// Also note that WorkNCCL::finishedGPUExecution() is a helper function only
|
| 245 |
+
// provided by ProcessGroupNCCL to check if the NCCL operation of WorkNCCL has
|
| 246 |
+
// finished execution on the GPU (not just scheduled).
|
| 247 |
+
//
|
| 248 |
+
// Example on using the NCCL process group
|
| 249 |
+
//
|
| 250 |
+
// ProcessGroupNCCL pg(store, rank, size);
|
| 251 |
+
// std::shared_ptr<WorkNCCL> work = pg.allreduce(tensors);
|
| 252 |
+
//
|
| 253 |
+
// // At this point, NCCL kernel has already by queued successfully
|
| 254 |
+
// // Now, let current stream wait for the NCCL to finish, this function is
|
| 255 |
+
// // async operation as well
|
| 256 |
+
//
|
| 257 |
+
// work->wait()
|
| 258 |
+
//
|
| 259 |
+
// // Now continue on other work in the current stream.
|
| 260 |
+
class TORCH_API ProcessGroupNCCL : public Backend {
|
| 261 |
+
public:
|
| 262 |
+
class WorkNCCL : public Work, public std::enable_shared_from_this<WorkNCCL> {
|
| 263 |
+
public:
|
| 264 |
+
friend struct WorkInfo;
|
| 265 |
+
|
| 266 |
+
// Constructor takes a list of CUDA devices
|
| 267 |
+
WorkNCCL(
|
| 268 |
+
const std::string& pgUID,
|
| 269 |
+
const std::string& pgDesc,
|
| 270 |
+
at::Device& device,
|
| 271 |
+
int rank,
|
| 272 |
+
OpType opType,
|
| 273 |
+
uint64_t seq,
|
| 274 |
+
const char* profilingTitle = nullptr,
|
| 275 |
+
const std::optional<std::vector<at::Tensor>>& inputs = std::nullopt,
|
| 276 |
+
bool desyncDebug = false,
|
| 277 |
+
bool enableTiming = false,
|
| 278 |
+
bool cudaEventCacheEnabled = false,
|
| 279 |
+
DebugLevel distDebugLevel = DebugLevel::Off);
|
| 280 |
+
// Copy constructor doing partial copy without outputs_. Cleanup thread
|
| 281 |
+
// monitors and removes finished works. However it will deadlock when
|
| 282 |
+
// destructs outputs_ tensors who are view tensors in autograd graph.
|
| 283 |
+
WorkNCCL(const WorkNCCL& w);
|
| 284 |
+
|
| 285 |
+
~WorkNCCL() override;
|
| 286 |
+
|
| 287 |
+
// Checks if the NCCL kernel has started to execute.
|
| 288 |
+
bool isStarted();
|
| 289 |
+
|
| 290 |
+
// Checks if request has completed. In this specific case of NCCL, it checks
|
| 291 |
+
// if the NCCL operation has completed on the GPU in its own NCCL stream.
|
| 292 |
+
// Non-blocking operation.
|
| 293 |
+
bool isCompleted() override;
|
| 294 |
+
|
| 295 |
+
bool isSuccess() const override;
|
| 296 |
+
|
| 297 |
+
// Same as calling synchronize() for NCCL work.
|
| 298 |
+
bool wait(std::chrono::milliseconds timeout = kNoTimeout) override;
|
| 299 |
+
|
| 300 |
+
void abort() override;
|
| 301 |
+
|
| 302 |
+
// Let current stream wait on the completing of the NCCL work
|
| 303 |
+
// Throws on exceptions. Blocking operation, which will wait for work
|
| 304 |
+
// completion.
|
| 305 |
+
void synchronize() override;
|
| 306 |
+
|
| 307 |
+
// Synchronize streams by blocking each on the NCCL stream
|
| 308 |
+
void synchronizeStream();
|
| 309 |
+
|
| 310 |
+
// Helper function to handle exception (throw if needed).
|
| 311 |
+
void handleException(ErrorHandlingMode asyncErrorHandling);
|
| 312 |
+
|
| 313 |
+
// Helper function that checks if the NCCL kernels have finished
|
| 314 |
+
// execution on the GPUs
|
| 315 |
+
bool finishedGPUExecution();
|
| 316 |
+
|
| 317 |
+
// Get a Future object that will be marked as completed internally.
|
| 318 |
+
c10::intrusive_ptr<c10::ivalue::Future> getFuture() override;
|
| 319 |
+
|
| 320 |
+
float getDuration() const override;
|
| 321 |
+
|
| 322 |
+
uint64_t getSequencenumber() const override;
|
| 323 |
+
|
| 324 |
+
const std::string& logPrefix() const;
|
| 325 |
+
|
| 326 |
+
// Helper function that sets an exception_ptr on the WorkNCCL object.
|
| 327 |
+
void setException(std::exception_ptr exception_ptr);
|
| 328 |
+
|
| 329 |
+
// Helper function that returns True if the WorkNCCL object has timed out
|
| 330 |
+
// and False otherwise.
|
| 331 |
+
// In case of timeout, set exception on the WorkNCCL object.
|
| 332 |
+
bool checkTimeout(
|
| 333 |
+
std::optional<std::chrono::milliseconds> timeout = std::nullopt);
|
| 334 |
+
|
| 335 |
+
std::vector<at::Tensor> result() override;
|
| 336 |
+
|
| 337 |
+
protected:
|
| 338 |
+
// The process group unique id
|
| 339 |
+
std::string pgUID_;
|
| 340 |
+
|
| 341 |
+
// The process group description
|
| 342 |
+
std::string pgDesc_;
|
| 343 |
+
|
| 344 |
+
// The cached list of CUDA devices to operate on
|
| 345 |
+
at::Device device_;
|
| 346 |
+
|
| 347 |
+
// The start CUDA event of NCCL operator tracking this work item. These
|
| 348 |
+
// start CUDA events are needed by desync debugging if enabled.
|
| 349 |
+
std::shared_ptr<at::cuda::CUDAEvent> ncclStartEvent_;
|
| 350 |
+
|
| 351 |
+
// The end CUDA event of NCCL operator tracking this work item.
|
| 352 |
+
std::shared_ptr<at::cuda::CUDAEvent> ncclEndEvent_;
|
| 353 |
+
|
| 354 |
+
// The NCCL communicator used for this work item.
|
| 355 |
+
std::shared_ptr<NCCLComm> ncclComm_;
|
| 356 |
+
|
| 357 |
+
// Tensors used for barrier op
|
| 358 |
+
at::Tensor barrierTensor_;
|
| 359 |
+
|
| 360 |
+
// Clone of blockingWait_ from ProcessGroupNCCL.
|
| 361 |
+
bool blockingWait_ = false;
|
| 362 |
+
|
| 363 |
+
// Clone of avoidRecordStreams_ from ProcessGroupNCCL.
|
| 364 |
+
bool avoidRecordStreams_ = false;
|
| 365 |
+
|
| 366 |
+
// Clone of opTimeout_ from ProcessGroupNCCL.
|
| 367 |
+
std::chrono::milliseconds opTimeout_;
|
| 368 |
+
|
| 369 |
+
// Ephemeral timeouts are owned by exactly one work,
|
| 370 |
+
// and reset after that work completes.
|
| 371 |
+
// There may be more than one ephemeral timeout active at the same time,
|
| 372 |
+
// and this variable is used to track the ownership of ephemeral timeout.
|
| 373 |
+
std::chrono::milliseconds ownedEphermeralTimeout_ =
|
| 374 |
+
std::chrono::milliseconds(0);
|
| 375 |
+
|
| 376 |
+
// Time point representing when the work started.
|
| 377 |
+
std::chrono::time_point<std::chrono::steady_clock> workStartTime_;
|
| 378 |
+
|
| 379 |
+
// Record the collective sequential number.
|
| 380 |
+
uint64_t seq_;
|
| 381 |
+
|
| 382 |
+
// Indicates if the nccl start event has been updated to the store trace.
|
| 383 |
+
// This will be used by desync debug.
|
| 384 |
+
bool startTraceUpdated_{false};
|
| 385 |
+
|
| 386 |
+
// Record collective sizes for debug. We only record the size on the first
|
| 387 |
+
// device as multi-device per process is deprecated
|
| 388 |
+
size_t numelIn_ = -1;
|
| 389 |
+
size_t numelOut_ = -1;
|
| 390 |
+
|
| 391 |
+
// Wrapper method for the static checkForNCCLErrors which can be overridden
|
| 392 |
+
// for tests.
|
| 393 |
+
virtual std::exception_ptr checkForNCCLErrors();
|
| 394 |
+
|
| 395 |
+
friend std::ostream& operator<<(
|
| 396 |
+
std::ostream& output,
|
| 397 |
+
const WorkNCCL& workNCCL);
|
| 398 |
+
|
| 399 |
+
private:
|
| 400 |
+
// Helper function for synchronize
|
| 401 |
+
void synchronizeInternal(std::chrono::milliseconds timeout);
|
| 402 |
+
|
| 403 |
+
// Checks for NCCL errors and sets an appropriate exception_ptr.
|
| 404 |
+
void checkAndSetException();
|
| 405 |
+
|
| 406 |
+
// Just checks whether GPU execution has started, without modifying
|
| 407 |
+
// exception_ptr.
|
| 408 |
+
bool startedGPUExecutionInternal() const;
|
| 409 |
+
|
| 410 |
+
// Just checks whether GPU execution has completed, without modifying
|
| 411 |
+
// exception_ptr.
|
| 412 |
+
bool finishedGPUExecutionInternal() const;
|
| 413 |
+
|
| 414 |
+
// Reference to the store so that we can write aborted communicators
|
| 415 |
+
// to the store.
|
| 416 |
+
c10::intrusive_ptr<Store> store_;
|
| 417 |
+
|
| 418 |
+
// Store a reference to NCCL collective's outputs, used by result and to
|
| 419 |
+
// give a more descriptive message when representing the Work as a string.
|
| 420 |
+
std::shared_ptr<std::vector<at::Tensor>> outputs_;
|
| 421 |
+
|
| 422 |
+
// TORCH_NCCL_AVOID_RECORD_STREAMS implementation helper.
|
| 423 |
+
// Stores references to participating non-output tensors (ie inputs,
|
| 424 |
+
// flattened intermediates).
|
| 425 |
+
// We'll clear this list in synchronizeStream, just after user-facing
|
| 426 |
+
// stream(s) are synced with the nccl work stream(s).
|
| 427 |
+
// By keeping these refs (as well as outputs_) alive until after the
|
| 428 |
+
// collective's work rejoins the user-facing streams, we achieve
|
| 429 |
+
// caching allocator safety without any recordStream calls.
|
| 430 |
+
// For in-place collectives, some refs stashed here may alias outputs_,
|
| 431 |
+
// but that doesn't do any harm.
|
| 432 |
+
std::shared_ptr<std::vector<at::Tensor>> stashed_for_allocator_safety_;
|
| 433 |
+
|
| 434 |
+
// The future returned by getFuture.
|
| 435 |
+
c10::intrusive_ptr<at::ivalue::Future> future_;
|
| 436 |
+
|
| 437 |
+
bool timingEnabled_;
|
| 438 |
+
// unique id used to tell the trace buffer that this
|
| 439 |
+
// work has completed
|
| 440 |
+
std::optional<uint64_t> trace_id_;
|
| 441 |
+
DebugLevel distDebugLevel_;
|
| 442 |
+
friend class ProcessGroupNCCL;
|
| 443 |
+
};
|
| 444 |
+
|
| 445 |
+
class CUDAEventCache {
|
| 446 |
+
public:
|
| 447 |
+
CUDAEventCache();
|
| 448 |
+
std::shared_ptr<at::cuda::CUDAEvent> create(bool timing);
|
| 449 |
+
static CUDAEventCache& get();
|
| 450 |
+
|
| 451 |
+
private:
|
| 452 |
+
std::mutex cacheMutex_;
|
| 453 |
+
// NOTE: We intentionaly store raw pointers so that
|
| 454 |
+
// we do not attempt to destroy the event objects on process exit,
|
| 455 |
+
// because cuda may be gone.
|
| 456 |
+
std::vector<at::cuda::CUDAEvent*>
|
| 457 |
+
eventsArray_[2]; // 0 for timing=false, 1 for timing=true
|
| 458 |
+
};
|
| 459 |
+
|
| 460 |
+
struct Options : Backend::Options {
|
| 461 |
+
// NOTE: timeout in ProcessGroupNCCL::Options denote the timeout for
|
| 462 |
+
// operations. This is only used when blockingWait_ is enabled.
|
| 463 |
+
explicit Options(bool is_high_priority_stream = false);
|
| 464 |
+
|
| 465 |
+
// return intrusive_ptr of the object
|
| 466 |
+
static c10::intrusive_ptr<Options> create(
|
| 467 |
+
bool is_high_priority_stream = false) {
|
| 468 |
+
return c10::make_intrusive<Options>(is_high_priority_stream);
|
| 469 |
+
}
|
| 470 |
+
|
| 471 |
+
// Schedule NCCL operations on high priority CUDA streams
|
| 472 |
+
bool is_high_priority_stream;
|
| 473 |
+
|
| 474 |
+
#ifdef NCCL_HAS_COMM_NONBLOCKING
|
| 475 |
+
// Configure ranks
|
| 476 |
+
ncclConfig_t config = NCCL_CONFIG_INITIALIZER;
|
| 477 |
+
#endif
|
| 478 |
+
|
| 479 |
+
// Optional "parent" backend and color to create communicators from
|
| 480 |
+
// via `ncclCommSplit`
|
| 481 |
+
std::shared_ptr<ProcessGroupNCCL> split_from;
|
| 482 |
+
int64_t split_color{0};
|
| 483 |
+
std::vector<uint64_t> global_ranks_in_group;
|
| 484 |
+
std::string group_name;
|
| 485 |
+
};
|
| 486 |
+
|
| 487 |
+
// If you wish to create multiple process groups, each with a potentially
|
| 488 |
+
// different rank and size, you can do so by passing a new store instance
|
| 489 |
+
// to each one. If you have only a single store object, you can
|
| 490 |
+
// use the `c10d::PrefixStore` to derive scoped instances.
|
| 491 |
+
// This is also what the Python API in torch.distributed does.
|
| 492 |
+
//
|
| 493 |
+
// The process group instance keeps a reference to the store because
|
| 494 |
+
// it may be used long after the constructor runs. In fact, the constructor
|
| 495 |
+
// doesn't create any NCCL communicators. A single NCCL communicator can
|
| 496 |
+
// only be used on a specific set of devices, and are therefore created
|
| 497 |
+
// on-demand when a collective runs. If another collective is executed later,
|
| 498 |
+
// against a different set of devices, the process group creates another NCCL
|
| 499 |
+
// communicator. These NCCL communicators are cached and reused if possible.
|
| 500 |
+
//
|
| 501 |
+
ProcessGroupNCCL(
|
| 502 |
+
const c10::intrusive_ptr<Store>& store,
|
| 503 |
+
int rank,
|
| 504 |
+
int size,
|
| 505 |
+
c10::intrusive_ptr<Options> options = Options::create());
|
| 506 |
+
|
| 507 |
+
// This constructor includes the deprecated `groupName` argument.
|
| 508 |
+
// If you have existing code that uses the `groupName`, you can replace
|
| 509 |
+
// it by specifying a `c10d::PrefixStore(groupName, store)` for store.
|
| 510 |
+
C10_DEPRECATED ProcessGroupNCCL(
|
| 511 |
+
const c10::intrusive_ptr<Store>& store,
|
| 512 |
+
int rank,
|
| 513 |
+
int size,
|
| 514 |
+
const std::string& groupName,
|
| 515 |
+
c10::intrusive_ptr<Options> options = Options::create())
|
| 516 |
+
: ProcessGroupNCCL(store, rank, size, options) {}
|
| 517 |
+
|
| 518 |
+
~ProcessGroupNCCL() override;
|
| 519 |
+
|
| 520 |
+
// This function returns a local uid for ProcessGroupNCCL.
|
| 521 |
+
uint64_t getUid() {
|
| 522 |
+
return static_cast<uint64_t>(local_id_);
|
| 523 |
+
}
|
| 524 |
+
|
| 525 |
+
c10::intrusive_ptr<Options> getOptions() {
|
| 526 |
+
return options_;
|
| 527 |
+
}
|
| 528 |
+
|
| 529 |
+
const std::string getBackendName() const override {
|
| 530 |
+
return std::string(NCCL_BACKEND_NAME);
|
| 531 |
+
}
|
| 532 |
+
|
| 533 |
+
bool supportsSplitting() const override {
|
| 534 |
+
return true;
|
| 535 |
+
}
|
| 536 |
+
|
| 537 |
+
void startCoalescing() override;
|
| 538 |
+
|
| 539 |
+
c10::intrusive_ptr<Work> endCoalescing() override;
|
| 540 |
+
|
| 541 |
+
// For specifying a composite optype, such as ALLGATHER and REDUCE_SCATTER
|
| 542 |
+
c10::intrusive_ptr<Work> endCoalescing(OpType optype);
|
| 543 |
+
|
| 544 |
+
c10::intrusive_ptr<Work> broadcast(
|
| 545 |
+
std::vector<at::Tensor>& tensors,
|
| 546 |
+
const BroadcastOptions& opts = BroadcastOptions()) override;
|
| 547 |
+
|
| 548 |
+
c10::intrusive_ptr<Work> _broadcast_oop(
|
| 549 |
+
at::Tensor& outputTensors,
|
| 550 |
+
at::Tensor& inputTensors,
|
| 551 |
+
const BroadcastOptions& opts = BroadcastOptions());
|
| 552 |
+
|
| 553 |
+
c10::intrusive_ptr<Work> allreduce_sparse(
|
| 554 |
+
std::vector<at::Tensor>& tensors,
|
| 555 |
+
const AllreduceOptions& opts = AllreduceOptions()) override;
|
| 556 |
+
|
| 557 |
+
c10::intrusive_ptr<Work> allreduce(
|
| 558 |
+
std::vector<at::Tensor>& tensors,
|
| 559 |
+
const AllreduceOptions& opts = AllreduceOptions()) override;
|
| 560 |
+
|
| 561 |
+
c10::intrusive_ptr<Work> allreduce_coalesced(
|
| 562 |
+
std::vector<at::Tensor>& tensors,
|
| 563 |
+
const AllreduceCoalescedOptions& opts =
|
| 564 |
+
AllreduceCoalescedOptions()) override;
|
| 565 |
+
|
| 566 |
+
c10::intrusive_ptr<Work> reduce(
|
| 567 |
+
std::vector<at::Tensor>& tensors,
|
| 568 |
+
const ReduceOptions& opts = ReduceOptions()) override;
|
| 569 |
+
|
| 570 |
+
c10::intrusive_ptr<Work> _reduce_oop(
|
| 571 |
+
at::Tensor& outputTensors,
|
| 572 |
+
at::Tensor& inputTensors,
|
| 573 |
+
const ReduceOptions& opts = ReduceOptions());
|
| 574 |
+
|
| 575 |
+
c10::intrusive_ptr<Work> allgather(
|
| 576 |
+
std::vector<std::vector<at::Tensor>>& outputTensors,
|
| 577 |
+
std::vector<at::Tensor>& inputTensors,
|
| 578 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 579 |
+
|
| 580 |
+
c10::intrusive_ptr<Work> _allgather_base(
|
| 581 |
+
at::Tensor& outputbuffer,
|
| 582 |
+
at::Tensor& inputbuffer,
|
| 583 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 584 |
+
|
| 585 |
+
c10::intrusive_ptr<Work> allgather_coalesced(
|
| 586 |
+
std::vector<std::vector<at::Tensor>>& outputTensorLists,
|
| 587 |
+
std::vector<at::Tensor>& inputTensors,
|
| 588 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 589 |
+
|
| 590 |
+
c10::intrusive_ptr<Work> allgather_into_tensor_coalesced(
|
| 591 |
+
std::vector<at::Tensor>& outputs,
|
| 592 |
+
std::vector<at::Tensor>& inputs,
|
| 593 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 594 |
+
|
| 595 |
+
c10::intrusive_ptr<Work> reduce_scatter(
|
| 596 |
+
std::vector<at::Tensor>& outputTensors,
|
| 597 |
+
std::vector<std::vector<at::Tensor>>& inputTensors,
|
| 598 |
+
const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
|
| 599 |
+
|
| 600 |
+
c10::intrusive_ptr<Work> _reduce_scatter_base(
|
| 601 |
+
at::Tensor& outputTensor,
|
| 602 |
+
at::Tensor& inputTensor,
|
| 603 |
+
const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
|
| 604 |
+
|
| 605 |
+
c10::intrusive_ptr<Work> reduce_scatter_tensor_coalesced(
|
| 606 |
+
std::vector<at::Tensor>& outputs,
|
| 607 |
+
std::vector<at::Tensor>& inputs,
|
| 608 |
+
const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
|
| 609 |
+
|
| 610 |
+
c10::intrusive_ptr<Work> barrier(
|
| 611 |
+
const BarrierOptions& opts = BarrierOptions()) override;
|
| 612 |
+
|
| 613 |
+
c10::intrusive_ptr<Work> alltoall_base(
|
| 614 |
+
at::Tensor& outputTensor,
|
| 615 |
+
at::Tensor& inputTensor,
|
| 616 |
+
std::vector<int64_t>& outputSplitSizes,
|
| 617 |
+
std::vector<int64_t>& inputSplitSizes,
|
| 618 |
+
const AllToAllOptions& opts = AllToAllOptions()) override;
|
| 619 |
+
|
| 620 |
+
c10::intrusive_ptr<Work> alltoall(
|
| 621 |
+
std::vector<at::Tensor>& outputTensors,
|
| 622 |
+
std::vector<at::Tensor>& inputTensors,
|
| 623 |
+
const AllToAllOptions& opts = AllToAllOptions()) override;
|
| 624 |
+
|
| 625 |
+
c10::intrusive_ptr<Work> send(
|
| 626 |
+
std::vector<at::Tensor>& tensors,
|
| 627 |
+
int dstRank,
|
| 628 |
+
int tag) override;
|
| 629 |
+
|
| 630 |
+
c10::intrusive_ptr<Work> recv(
|
| 631 |
+
std::vector<at::Tensor>& tensors,
|
| 632 |
+
int srcRank,
|
| 633 |
+
int tag) override;
|
| 634 |
+
|
| 635 |
+
void groupStart();
|
| 636 |
+
|
| 637 |
+
void groupEnd();
|
| 638 |
+
|
| 639 |
+
void groupEndNonblocking(std::shared_ptr<NCCLComm> comm);
|
| 640 |
+
|
| 641 |
+
c10::intrusive_ptr<Work> gather(
|
| 642 |
+
std::vector<std::vector<at::Tensor>>& outputTensors,
|
| 643 |
+
std::vector<at::Tensor>& inputTensors,
|
| 644 |
+
const GatherOptions& opts = GatherOptions()) override;
|
| 645 |
+
|
| 646 |
+
c10::intrusive_ptr<Work> scatter(
|
| 647 |
+
std::vector<at::Tensor>& outputTensors,
|
| 648 |
+
std::vector<std::vector<at::Tensor>>& inputTensors,
|
| 649 |
+
const ScatterOptions& opts = ScatterOptions()) override;
|
| 650 |
+
|
| 651 |
+
// Unsupported Ops
|
| 652 |
+
c10::intrusive_ptr<Work> recvAnysource(
|
| 653 |
+
std::vector<at::Tensor>& tensors,
|
| 654 |
+
int tag) override;
|
| 655 |
+
|
| 656 |
+
// Agrees on an initial sequence number for the whole group by having rank 0
|
| 657 |
+
// create it and broadcast it to other ranks using the store.
|
| 658 |
+
void setSequenceNumberForGroup() override;
|
| 659 |
+
|
| 660 |
+
// Retrieves the current sequence number for the whole group, which should be
|
| 661 |
+
// in sync. If the returned number is not consistent across the group, it
|
| 662 |
+
// may indicate that there is some sort of collective desynchronization.
|
| 663 |
+
uint64_t getSequenceNumberForGroup() override;
|
| 664 |
+
|
| 665 |
+
// Return the total number of splits the communicators held by this process
|
| 666 |
+
// group have performed. Counts ncclCommCreateFromRanks() for ncclx v2.21.5+
|
| 667 |
+
uint64_t getCommSplitCounter() const;
|
| 668 |
+
|
| 669 |
+
void registerOnCompletionHook(
|
| 670 |
+
std::function<void(std::shared_ptr<WorkInfo>)>&& hook) override;
|
| 671 |
+
void waitForPendingWorks() override;
|
| 672 |
+
|
| 673 |
+
void enableCollectivesTiming() override;
|
| 674 |
+
|
| 675 |
+
// Helper function for iteratively aborting communicators in the provided map
|
| 676 |
+
void abortCommsFromMap(
|
| 677 |
+
std::unordered_map<std::string, std::shared_ptr<NCCLComm>>& ncclCommsMap,
|
| 678 |
+
std::optional<std::string> abortReason);
|
| 679 |
+
|
| 680 |
+
c10::intrusive_ptr<intra_node_comm::IntraNodeComm> initIntraNodeComm();
|
| 681 |
+
|
| 682 |
+
// Provides an API to abort the ProcessGroup (similar to ncclCommAbort)
|
| 683 |
+
// instead of relying on ProcessGroupNCCL destructor.
|
| 684 |
+
// return true if abort is successful, otherwise false
|
| 685 |
+
bool abort(std::optional<std::string> abortReason = std::nullopt);
|
| 686 |
+
|
| 687 |
+
void shutdown(std::optional<std::string> reason = std::nullopt);
|
| 688 |
+
|
| 689 |
+
void eagerConnectSingleDevice(at::Device device) override;
|
| 690 |
+
|
| 691 |
+
void performNocolorSplit(at::Device device);
|
| 692 |
+
|
| 693 |
+
// This method adds a temporary extension for the timeout period,
|
| 694 |
+
// applying to all collectives between the calling of this API and
|
| 695 |
+
// the completion of the first collective on the GPU. While this feature
|
| 696 |
+
// provides flexibility in specific scenarios, it introduces statefulness
|
| 697 |
+
// to timeout setting. Therefore, it is advisable to use this API sparingly
|
| 698 |
+
// and consider alternative approaches, such as directly setting the timeout
|
| 699 |
+
// or utilizing a barrier collective (one can set any timeout to the barrier),
|
| 700 |
+
// whenever feasible.
|
| 701 |
+
void addEphemeralTimeout(const std::chrono::milliseconds& timeout);
|
| 702 |
+
|
| 703 |
+
// This function is only intended for testing purposes because we don't
|
| 704 |
+
// want to expose the `WorkNCCL` via pybind. It verifies whether the
|
| 705 |
+
// `opTimeout_` of the provided WorkNCCL instance is the same as the specified
|
| 706 |
+
// timeout.
|
| 707 |
+
bool verifyWorkTimeoutForTest(
|
| 708 |
+
const c10::intrusive_ptr<Work> work,
|
| 709 |
+
const std::chrono::milliseconds& timeout);
|
| 710 |
+
|
| 711 |
+
protected:
|
| 712 |
+
// Helper that broadcasts nccl unique ID to all ranks through the store
|
| 713 |
+
void broadcastUniqueNCCLID(
|
| 714 |
+
ncclUniqueId* ncclID,
|
| 715 |
+
bool isSingleP2POp,
|
| 716 |
+
const std::string& devicesKey,
|
| 717 |
+
int p2pRank);
|
| 718 |
+
|
| 719 |
+
// Helper that either looks up the cached NCCL communicators or creates
|
| 720 |
+
// a new set of NCCL communicators as a cache entry
|
| 721 |
+
std::shared_ptr<NCCLComm> getNCCLComm(
|
| 722 |
+
const std::string& deviceKey,
|
| 723 |
+
at::Device& device,
|
| 724 |
+
OpType opType,
|
| 725 |
+
int p2pRank = 0,
|
| 726 |
+
bool isSendRecvSelf = false);
|
| 727 |
+
|
| 728 |
+
// Wrapper method which can be overridden for tests.
|
| 729 |
+
virtual std::exception_ptr checkForNCCLErrors(
|
| 730 |
+
std::shared_ptr<NCCLComm>& ncclComm);
|
| 731 |
+
|
| 732 |
+
// Ensure thaht if record is True, the work obj will be enqueued via
|
| 733 |
+
// workEnqueue
|
| 734 |
+
virtual c10::intrusive_ptr<ProcessGroupNCCL::WorkNCCL> initWork(
|
| 735 |
+
at::Device& device,
|
| 736 |
+
int rank,
|
| 737 |
+
OpType opType,
|
| 738 |
+
const char* profilingTitle = nullptr,
|
| 739 |
+
const std::vector<at::Tensor>& inputs = {},
|
| 740 |
+
const std::vector<at::Tensor>& outputs = {},
|
| 741 |
+
bool record = false);
|
| 742 |
+
|
| 743 |
+
// In the timeout case and we will dump debug info such as the NCCL flight
|
| 744 |
+
// recorder to storage. Down the road, if we have more complicated or blocking
|
| 745 |
+
// operations, we might need to use a side thread to do it.
|
| 746 |
+
bool dumpDebuggingInfo();
|
| 747 |
+
|
| 748 |
+
private:
|
| 749 |
+
int globalRankStart;
|
| 750 |
+
int globalRankStride;
|
| 751 |
+
|
| 752 |
+
// Helper that encapsulates work shared across all collective communication
|
| 753 |
+
// primitives. The callbacks have the following signatures:
|
| 754 |
+
//
|
| 755 |
+
// ncclResult_t fn(at::Tensor& input, at::Tensor& output,
|
| 756 |
+
// ncclComm_t, at::cuda::CUDAStream&);
|
| 757 |
+
// void {pre,post}(std::vector<at::cuda::CUDAStream&>);
|
| 758 |
+
template <typename Fn>
|
| 759 |
+
c10::intrusive_ptr<Work> collective(
|
| 760 |
+
at::Tensor& input,
|
| 761 |
+
at::Tensor& output,
|
| 762 |
+
Fn fn,
|
| 763 |
+
OpType opType,
|
| 764 |
+
const char* profilingTitle = nullptr,
|
| 765 |
+
bool avoidRecordStreams = false,
|
| 766 |
+
bool nanCheck = true);
|
| 767 |
+
|
| 768 |
+
template <typename Fn, typename PreProcess, typename PostProcess>
|
| 769 |
+
c10::intrusive_ptr<Work> collective(
|
| 770 |
+
at::Tensor& input,
|
| 771 |
+
at::Tensor& output,
|
| 772 |
+
Fn fn,
|
| 773 |
+
PreProcess pre,
|
| 774 |
+
PostProcess post,
|
| 775 |
+
OpType opType,
|
| 776 |
+
const char* profilingTitle = nullptr,
|
| 777 |
+
bool avoidRecordStreams = false,
|
| 778 |
+
bool nanCheck = true);
|
| 779 |
+
|
| 780 |
+
template <typename Fn, typename PreProcess, typename PostProcess>
|
| 781 |
+
c10::intrusive_ptr<Work> collective(
|
| 782 |
+
std::vector<at::Tensor>& inputs,
|
| 783 |
+
std::vector<at::Tensor>& outputs,
|
| 784 |
+
Fn fn,
|
| 785 |
+
PreProcess pre,
|
| 786 |
+
PostProcess post,
|
| 787 |
+
OpType opType,
|
| 788 |
+
const char* profilingTitle = nullptr,
|
| 789 |
+
bool avoidRecordStreams = false,
|
| 790 |
+
bool nanCheck = true);
|
| 791 |
+
|
| 792 |
+
template <typename Fn>
|
| 793 |
+
c10::intrusive_ptr<Work> collectiveCoalesced(
|
| 794 |
+
std::vector<at::Tensor>& input,
|
| 795 |
+
std::vector<at::Tensor>& output,
|
| 796 |
+
Fn fn,
|
| 797 |
+
OpType opType,
|
| 798 |
+
const char* profilingTitle = nullptr,
|
| 799 |
+
bool avoidRecordStreams = false);
|
| 800 |
+
|
| 801 |
+
// Helper that encapsulates work shared across point-to-point communication
|
| 802 |
+
// primitives. It is the same structure as the helper used for collective
|
| 803 |
+
// communication primitives.
|
| 804 |
+
template <typename Fn>
|
| 805 |
+
c10::intrusive_ptr<Work> pointToPoint(
|
| 806 |
+
at::Tensor& tensor,
|
| 807 |
+
Fn fn,
|
| 808 |
+
int peer,
|
| 809 |
+
OpType opType,
|
| 810 |
+
const char* profilingTitle = nullptr);
|
| 811 |
+
|
| 812 |
+
template <typename Fn, typename PreProcess, typename PostProcess>
|
| 813 |
+
c10::intrusive_ptr<Work> pointToPoint(
|
| 814 |
+
at::Tensor& tensor,
|
| 815 |
+
Fn fn,
|
| 816 |
+
int peer,
|
| 817 |
+
OpType opType,
|
| 818 |
+
PreProcess pre,
|
| 819 |
+
PostProcess post,
|
| 820 |
+
const char* profilingTitle);
|
| 821 |
+
|
| 822 |
+
c10::intrusive_ptr<Work> allreduce_impl(
|
| 823 |
+
at::Tensor& tensor,
|
| 824 |
+
const AllreduceOptions& opts = AllreduceOptions());
|
| 825 |
+
|
| 826 |
+
// Checks for NCCL errors on each of the communicators and returns an
|
| 827 |
+
// appropriate exception_ptr (nullptr if no errors).
|
| 828 |
+
static std::exception_ptr checkForNCCLErrorsInternal(
|
| 829 |
+
std::shared_ptr<NCCLComm>& ncclComm);
|
| 830 |
+
|
| 831 |
+
// Function that runs as part of a separate thread and checks for errors on
|
| 832 |
+
// NCCL communicators. We need a separate thread to check for NCCL errors
|
| 833 |
+
// since we can't rely on the user calling certain methods like wait(),
|
| 834 |
+
// isCompleted() etc. to detect and remediate errors. In addition to this, we
|
| 835 |
+
// need a mechanism to safely abort and remove NCCL communicators from our
|
| 836 |
+
// cache. This can be done cleanly by having a thread for the ProcessGroupNCCL
|
| 837 |
+
// class. Attempting to modify the communicator cache from the WorkNCCL class
|
| 838 |
+
// might run into issues with object lifetime since the ProcessGroupNCCL
|
| 839 |
+
// object might get destroyed before the WorkNCCL object.
|
| 840 |
+
void ncclCommWatchdog();
|
| 841 |
+
|
| 842 |
+
// Return the CUDA device most likely associated with this backend.
|
| 843 |
+
// If we aren't bound to a specific device, there is no strict
|
| 844 |
+
// guarantee that this heuristic is the correct assignment of ranks
|
| 845 |
+
// to GPUs that Python layers use, but in practice it tends to be.
|
| 846 |
+
// Fortunately we don't rely on this for correctness of any tensor
|
| 847 |
+
// operations, just for ancillary uses like barriers.
|
| 848 |
+
at::Device guessDeviceForRank() const;
|
| 849 |
+
|
| 850 |
+
// Destroys initialized NCCL communicators in devNCCLComMap_ given by input
|
| 851 |
+
// key. Throws if there are no communicators to destroy. Also removes
|
| 852 |
+
// communicators from the cache and clears used device indices.
|
| 853 |
+
void destroyNCCLComms(const std::string& devNCCLCommMapKey);
|
| 854 |
+
|
| 855 |
+
// Watchdog's inside loop.
|
| 856 |
+
// Takes care of cleaning up completed work, and aborting upon failure or
|
| 857 |
+
// timeout.
|
| 858 |
+
void watchdogHandler();
|
| 859 |
+
|
| 860 |
+
void runHookLoop();
|
| 861 |
+
|
| 862 |
+
// Desync debug helper
|
| 863 |
+
void logWorkStart(WorkNCCL& work);
|
| 864 |
+
|
| 865 |
+
// Desync debug helper
|
| 866 |
+
void logWorkEnd(WorkNCCL& work);
|
| 867 |
+
|
| 868 |
+
// Generates a prefix that is unique to this process group and rank, for
|
| 869 |
+
// disambiguating logs
|
| 870 |
+
std::string createLogPrefix() const;
|
| 871 |
+
|
| 872 |
+
// Returns the unique prefix created in createLogPrefix
|
| 873 |
+
const std::string& logPrefix() const;
|
| 874 |
+
|
| 875 |
+
// Returns the global rank of the device. This function assumes that users
|
| 876 |
+
// always create a default global process group(PG) which includes all
|
| 877 |
+
// devices. It is called in the constructor of ProcessGroupNCCL, so it always
|
| 878 |
+
// return the rank_ of the the very first PG created, aka, default global PG.
|
| 879 |
+
const int& globalRank() const;
|
| 880 |
+
|
| 881 |
+
// Returns the global ranks of a PG.
|
| 882 |
+
const std::vector<uint64_t>& groupRanks() const;
|
| 883 |
+
|
| 884 |
+
// Util function to assign timeout to each work.
|
| 885 |
+
void assignTimeoutToWork(
|
| 886 |
+
const c10::intrusive_ptr<ProcessGroupNCCL::WorkNCCL>& work,
|
| 887 |
+
const c10::intrusive_ptr<Options>& option);
|
| 888 |
+
|
| 889 |
+
protected:
|
| 890 |
+
// Function that runs as part of a separate thread aside from watchdog
|
| 891 |
+
// thread because we need to check the heartbeat from watchdog thread
|
| 892 |
+
// so that when we get stuck in some NCCL/CUDA calls,
|
| 893 |
+
// we can dump the debugging information and abort the process.
|
| 894 |
+
virtual void heartbeatMonitor();
|
| 895 |
+
|
| 896 |
+
// Function that directly trigger std::abort so that the whole process
|
| 897 |
+
// gets terminated.
|
| 898 |
+
virtual void terminateProcess(std::string errMsg);
|
| 899 |
+
|
| 900 |
+
// A helper function to wait for a future to complete or timeout.
|
| 901 |
+
void waitForFutureOrTimeout(
|
| 902 |
+
std::future<bool>& fut,
|
| 903 |
+
const std::chrono::milliseconds& timeOutMilSec,
|
| 904 |
+
const std::string& futDescription,
|
| 905 |
+
bool throwException = false,
|
| 906 |
+
bool log = false);
|
| 907 |
+
|
| 908 |
+
// When watchdog timeout, this function will be called and return debug info
|
| 909 |
+
// for users. For now we only get information from retrieveDesyncReport.
|
| 910 |
+
// We are working on enabling more useful debug information for watchdog
|
| 911 |
+
// timeout.
|
| 912 |
+
virtual std::string getNCCLWatchdogDebugInfo();
|
| 913 |
+
|
| 914 |
+
std::string getNCCLWatchdogTimeoutErrorMsg(const std::string& extraMsg);
|
| 915 |
+
|
| 916 |
+
std::string getNCCLWatchdogTimeoutExitMsg(const std::string& exitReason);
|
| 917 |
+
|
| 918 |
+
static const int64_t kWatchdogThreadSleepMillis;
|
| 919 |
+
|
| 920 |
+
// The store is used to broadcast the NCCL unique ID of rank 0. This store
|
| 921 |
+
// comes with prefix and it is different across ProcessGroup NCCL instances
|
| 922 |
+
// (aka, different ProcessGroups).
|
| 923 |
+
c10::intrusive_ptr<Store> store_;
|
| 924 |
+
|
| 925 |
+
// Reference to the store without prefix so that keys are same across all
|
| 926 |
+
// ProcessGroup NCCL instances and (key, value) pairs written to the store are
|
| 927 |
+
// global.
|
| 928 |
+
c10::intrusive_ptr<Store> globalStore_;
|
| 929 |
+
|
| 930 |
+
bool storeError_{false};
|
| 931 |
+
|
| 932 |
+
// The lock which protects the write/read of
|
| 933 |
+
// ephemeralTimeoutActive_/ephemeralTimeoutInflight_.
|
| 934 |
+
// TODO(fduwjj): We need to have an audit on all mutexes we are adding here.
|
| 935 |
+
// And consolidate them if possible.
|
| 936 |
+
std::mutex mtxTimeoutExtension_;
|
| 937 |
+
|
| 938 |
+
// The ephemeral timeout added on top of existing timeout for works issued
|
| 939 |
+
// before first work finishes.
|
| 940 |
+
std::chrono::milliseconds ephemeralTimeoutActive_ =
|
| 941 |
+
std::chrono::milliseconds(0);
|
| 942 |
+
|
| 943 |
+
// The ephemeral timeout addition which has been already applied to work.
|
| 944 |
+
std::chrono::milliseconds ephemeralTimeoutInflight_ =
|
| 945 |
+
std::chrono::milliseconds(0);
|
| 946 |
+
|
| 947 |
+
const c10::intrusive_ptr<Options> options_;
|
| 948 |
+
|
| 949 |
+
// The number of NCCL communicators that have been created during
|
| 950 |
+
// the lifetime of this process group. This sequence number is
|
| 951 |
+
// used to scope keys used in the store.
|
| 952 |
+
uint64_t ncclCommCounter_{0};
|
| 953 |
+
|
| 954 |
+
// The store keys to trace the last NCCL collective kernel CUDA events - start
|
| 955 |
+
// event and end event respectively. These are used to do desync root cause
|
| 956 |
+
// analysis.
|
| 957 |
+
const std::string traceKeyStart_;
|
| 958 |
+
const std::string traceKeyEnd_;
|
| 959 |
+
|
| 960 |
+
// The NCCL communicator that the process group has cached.
|
| 961 |
+
//
|
| 962 |
+
// For collective operations:
|
| 963 |
+
// The key is a list of GPU devices that an operation is operating on
|
| 964 |
+
// The GPU devices are stored in a device sequence and the cache NCCL
|
| 965 |
+
// communicator is associated with this GPU device sequence
|
| 966 |
+
//
|
| 967 |
+
// e.g. If the process group op only uses device 0, then the value of
|
| 968 |
+
// the used device string stored (value of the hashmap) would be "0".
|
| 969 |
+
//
|
| 970 |
+
// If the process group op uses device 0 - 7 and the each tensor of the
|
| 971 |
+
// input tensor list is on device, 0, 1, 2, 3, 4, 5, 6, 7 separately,
|
| 972 |
+
// then the value of the used device string (key) stored would be
|
| 973 |
+
// "0,1,2,3,4,5,6,7"
|
| 974 |
+
//
|
| 975 |
+
// If the process group op uses device 0 - 7 and the each tensor of the
|
| 976 |
+
// input tensor list is on device, 0, 4, 5, 6, 7, 1, 2, 3 separately,
|
| 977 |
+
// then the value of the used device string stored would be
|
| 978 |
+
// "0,4,5,6,7,1,2,3"
|
| 979 |
+
//
|
| 980 |
+
// Note that the order of the device for the tensor list matters.
|
| 981 |
+
//
|
| 982 |
+
// For point-to-point operations:
|
| 983 |
+
// The key is a string of my current rank and the peer process rank.
|
| 984 |
+
// e.g. If process 1 and process 2 are involved in a point-to-point
|
| 985 |
+
// communication, the key will be "1:2" on both processes. Note: this is for
|
| 986 |
+
// the scenario where there is only 1 GPU per process. When it comes to
|
| 987 |
+
// multiple GPUs per process, this part may need to redesigned.
|
| 988 |
+
// TODO: we probably need a separte map for P2P comms
|
| 989 |
+
std::unordered_map<std::string, std::shared_ptr<NCCLComm>> devNCCLCommMap_;
|
| 990 |
+
|
| 991 |
+
// The NCCL communicators currently in process of being initialized.
|
| 992 |
+
std::unordered_map<std::string, std::shared_ptr<NCCLComm>>
|
| 993 |
+
inInitializationCommMap_;
|
| 994 |
+
|
| 995 |
+
// Mutex to guard maps like devNCCLCommMap_.
|
| 996 |
+
std::mutex mutex_;
|
| 997 |
+
|
| 998 |
+
// Heartbeat of watchdog thread.
|
| 999 |
+
std::atomic_uint64_t heartbeat_;
|
| 1000 |
+
|
| 1001 |
+
// The time interval used for deciding whether there is no watchdog heartbeat.
|
| 1002 |
+
int heartbeatTimeoutInSec_;
|
| 1003 |
+
|
| 1004 |
+
// timeout for the dump to finish.
|
| 1005 |
+
int waitTimeoutDumpInMilSec_;
|
| 1006 |
+
|
| 1007 |
+
// Interval of check coordinated signals in ProcessGroupNCCL from other ranks
|
| 1008 |
+
// e.g., trigger the dump of the debugging info for timeout when notified.
|
| 1009 |
+
int coordCheckIntervalMilSec_;
|
| 1010 |
+
|
| 1011 |
+
// Size of ring buffer where we store NCCL Traces for debugging.
|
| 1012 |
+
int ncclTraceBufferSize_;
|
| 1013 |
+
|
| 1014 |
+
// We gate the heartbeat monitor thread so that we can roll it out gradually.
|
| 1015 |
+
std::atomic<bool> monitorThreadEnabled_;
|
| 1016 |
+
|
| 1017 |
+
// We gate the cudaEventCache so that we can roll it out gradually.
|
| 1018 |
+
std::atomic<bool> cudaEventCacheEnabled_;
|
| 1019 |
+
|
| 1020 |
+
// Monitor thread which checks the heartbeat of Watchdog thread.
|
| 1021 |
+
// If the monitor thread finds there is no heartbeat, it will dump debug info
|
| 1022 |
+
// and then kill the watchdog thread to avoid hang.
|
| 1023 |
+
std::thread ncclHeartbeatMonitorThread_;
|
| 1024 |
+
|
| 1025 |
+
// Watchdog thread which looks for errors on the cached NCCL communicators.
|
| 1026 |
+
std::thread ncclCommWatchdogThread_;
|
| 1027 |
+
|
| 1028 |
+
std::thread onCompletionHookThread_;
|
| 1029 |
+
|
| 1030 |
+
// Whether or not we should terminate the watchdog and workCleanup threads.
|
| 1031 |
+
std::atomic<bool> terminateProcessGroup_;
|
| 1032 |
+
|
| 1033 |
+
// Whether or not we should terminate the heartbeat monitoring threads.
|
| 1034 |
+
std::atomic<bool> terminateHeartbeatMonitorThread_;
|
| 1035 |
+
|
| 1036 |
+
// Whether we are in the shutdown mode when we are trying to get debug info,
|
| 1037 |
+
// such as desync report.
|
| 1038 |
+
std::atomic<bool> collectiveDebugInfoMode_;
|
| 1039 |
+
|
| 1040 |
+
// Whether there are hooks pending to be fired
|
| 1041 |
+
std::atomic<bool> hasPendingHooks_;
|
| 1042 |
+
|
| 1043 |
+
// This is the signal from watchdog threads to indicate whether the monitor
|
| 1044 |
+
// thread should dump. Making it static so that it is accessiable from all the
|
| 1045 |
+
// PGs. With this flag, monitor thread would dump debug info under any one of
|
| 1046 |
+
// the three conditions:
|
| 1047 |
+
//
|
| 1048 |
+
// 1: watchdog thread of any PG detects a collective timeout.
|
| 1049 |
+
// 2: timeout signal is received from other ranks through tcpstore.
|
| 1050 |
+
// 3: current PG's watchdog heartbeat timeout occurs.
|
| 1051 |
+
//
|
| 1052 |
+
// Note that only the monitor thread from PG0 will dump the debug info for
|
| 1053 |
+
// case one and two so that the debug info is only dumped once.
|
| 1054 |
+
static std::atomic<bool> shouldDump_;
|
| 1055 |
+
|
| 1056 |
+
// Mutex to Guard workMetaList_
|
| 1057 |
+
std::mutex workMetaListMutex_;
|
| 1058 |
+
|
| 1059 |
+
// Mutex to Guard monitorWakeUpCV_
|
| 1060 |
+
std::mutex monitorMutex_;
|
| 1061 |
+
|
| 1062 |
+
bool writeDebugInfo_ = false;
|
| 1063 |
+
|
| 1064 |
+
// Condition Variable for watchdog thread sleep
|
| 1065 |
+
std::condition_variable workMetaListCV_;
|
| 1066 |
+
|
| 1067 |
+
// Condition Variable for monitor thread to wake up early
|
| 1068 |
+
std::condition_variable monitorWakeUpCV_;
|
| 1069 |
+
|
| 1070 |
+
// Vector to Store WorkNCCL pointers
|
| 1071 |
+
std::list<ProcessGroupNCCL::WorkNCCL> workMetaList_;
|
| 1072 |
+
|
| 1073 |
+
std::chrono::time_point<std::chrono::steady_clock> lastWorkListUpdateTime_;
|
| 1074 |
+
|
| 1075 |
+
// Mutex to Guard workMetaList_
|
| 1076 |
+
std::mutex completedWorkListMutex_;
|
| 1077 |
+
|
| 1078 |
+
// Condition Variable for watchdog thread sleep
|
| 1079 |
+
std::condition_variable completedWorkListCV_;
|
| 1080 |
+
|
| 1081 |
+
std::list<ProcessGroupNCCL::WorkNCCL> completedWorkList_;
|
| 1082 |
+
|
| 1083 |
+
// Add Work Pointer to workVector
|
| 1084 |
+
void workEnqueue(c10::intrusive_ptr<ProcessGroupNCCL::WorkNCCL>);
|
| 1085 |
+
|
| 1086 |
+
// The CUDA streams used by NCCL kernels
|
| 1087 |
+
std::unordered_map<std::string, at::cuda::CUDAStream> ncclStreams_;
|
| 1088 |
+
|
| 1089 |
+
// The CUDA events used to sync NCCL streams
|
| 1090 |
+
std::unordered_map<std::string, at::cuda::CUDAEvent> ncclEvents_;
|
| 1091 |
+
|
| 1092 |
+
// Device Indexes used for all collectives in this group
|
| 1093 |
+
std::set<int> usedDeviceIdxs_;
|
| 1094 |
+
|
| 1095 |
+
// Flag to denote if a coalescing groupStart/groupEnd block is active
|
| 1096 |
+
int coalescing_state_ = 0;
|
| 1097 |
+
|
| 1098 |
+
// Stores device indexes for all collectives run inside a coalescing block
|
| 1099 |
+
at::Device coalescedDevice_ = at::Device("cuda");
|
| 1100 |
+
|
| 1101 |
+
// Stores communicators for all collectives run inside a coalescing block
|
| 1102 |
+
std::shared_ptr<NCCLComm> coalescedComm_ = nullptr;
|
| 1103 |
+
|
| 1104 |
+
// map from the key: "group name + pg counter (ID)" to the
|
| 1105 |
+
// unique NCCL ID count. This needs to be group and pg specific
|
| 1106 |
+
//
|
| 1107 |
+
// For each process group, we need a uniform unique NCCL ID counter to ensure
|
| 1108 |
+
// that NCCL operation in this process group can be completed successfully.
|
| 1109 |
+
// Since each process group ID belongs to a group name, the key to this map
|
| 1110 |
+
// is a combination of group name and ProcessGroupNCCL ID.
|
| 1111 |
+
static std::unordered_map<std::string, ssize_t> pgUniqueNCCLIDCnt_;
|
| 1112 |
+
|
| 1113 |
+
// map from group name to the pg counter (ID) within that group
|
| 1114 |
+
//
|
| 1115 |
+
// For each group with the "group name" (which is the key), we need to
|
| 1116 |
+
// keep track of a unique process group ID when creating a new
|
| 1117 |
+
// ProcessGroupNCCL for this "group name". Therefore, the value of this
|
| 1118 |
+
// map keeps the unique ProcessGroupNCCL's ID for a specific group with
|
| 1119 |
+
// the "group name". The reason we need a per-group process group ID counter
|
| 1120 |
+
// is that different group can have different ranks and we need ensure that
|
| 1121 |
+
// each group has its own uniform process group ID for all its ranks.
|
| 1122 |
+
static std::unordered_map<std::string, ssize_t> processGroupCounterMap_;
|
| 1123 |
+
|
| 1124 |
+
// Whether or not wait() and synchronize() are blocking operations that wait
|
| 1125 |
+
// for the operation to complete.
|
| 1126 |
+
bool blockingWait_ = false;
|
| 1127 |
+
|
| 1128 |
+
// Whether or not to hook the cache allocator to register all allocated
|
| 1129 |
+
// tensors
|
| 1130 |
+
bool useTensorRegisterAllocatorHook_ = false;
|
| 1131 |
+
|
| 1132 |
+
// Whether or not the workCleanupThread is used to perform async error
|
| 1133 |
+
// handling.
|
| 1134 |
+
ErrorHandlingMode asyncErrorHandling_ = NoHandling;
|
| 1135 |
+
|
| 1136 |
+
// Whether or not to enable timeout root cause analysis.
|
| 1137 |
+
bool desyncDebug_;
|
| 1138 |
+
|
| 1139 |
+
// Whether or not to dump debug info on exception including both watchdog
|
| 1140 |
+
// timeout and nccl errors.
|
| 1141 |
+
bool dumpOnTimeoutOrEx_;
|
| 1142 |
+
|
| 1143 |
+
// Whether or not to enable nan check for input tensors to collectives.
|
| 1144 |
+
bool enableNanCheck_;
|
| 1145 |
+
|
| 1146 |
+
// Whether or not to print C++ stack traces to logs on unclean shutdown.
|
| 1147 |
+
bool logCppStackOnUncleanShutdown_;
|
| 1148 |
+
|
| 1149 |
+
// Whether or not to create start CUDAEvent and enable timing for start
|
| 1150 |
+
// and end events. Note that enableTiming_ is always true if desyncDebug_
|
| 1151 |
+
// is set to true.
|
| 1152 |
+
std::atomic<bool> enableTiming_;
|
| 1153 |
+
|
| 1154 |
+
// Flag to enable the print of hash value of input/output of collectives for
|
| 1155 |
+
// verification.
|
| 1156 |
+
std::atomic<bool> enableCollecticeHashDebug_;
|
| 1157 |
+
|
| 1158 |
+
// Whether or not TORCH_NCCL_AVOID_RECORD_STREAMS was set
|
| 1159 |
+
bool avoidRecordStreams_ = false;
|
| 1160 |
+
|
| 1161 |
+
// Whether the NCCL watchdog should rethrow CUDA errors.
|
| 1162 |
+
bool rethrowCUDAErrors_ = false;
|
| 1163 |
+
|
| 1164 |
+
// Set of communicators that this process group has aborted and their
|
| 1165 |
+
// ncclUniqueId has been written to the store. We don't need a lock
|
| 1166 |
+
// for this map since only the watchdog thread accesses this set. The
|
| 1167 |
+
// set contains the string representation of ncclUniqueId.
|
| 1168 |
+
std::unordered_set<std::string> abortedComms_;
|
| 1169 |
+
|
| 1170 |
+
// The number of active ncclGroupStart() calls. This counter will be increased
|
| 1171 |
+
// by 1 when ncclGroupStart() is called and decreased by 1 when ncclGroupEnd()
|
| 1172 |
+
// is called.
|
| 1173 |
+
static thread_local uint64_t ncclActiveGroupCounter_;
|
| 1174 |
+
|
| 1175 |
+
// Counting for the sequential number of NCCL collective call.
|
| 1176 |
+
// (specifically, how many actual kernels we launched, which differs from
|
| 1177 |
+
// op_id_ when coalescing is enabled)
|
| 1178 |
+
uint64_t seqCollective_{0};
|
| 1179 |
+
|
| 1180 |
+
// Counting for the sequential number of NCCL P2P calls.
|
| 1181 |
+
uint64_t seqP2P_{0};
|
| 1182 |
+
|
| 1183 |
+
// Incrementing counter for logical operations (collective or p2p) issued on
|
| 1184 |
+
// the ProcessGroup
|
| 1185 |
+
uint64_t op_id_{0};
|
| 1186 |
+
|
| 1187 |
+
std::exception_ptr watchDogException_ = nullptr;
|
| 1188 |
+
|
| 1189 |
+
// The number of ProcessGroupNCCL created on the current rank.
|
| 1190 |
+
size_t local_id_;
|
| 1191 |
+
|
| 1192 |
+
std::string logPrefix_;
|
| 1193 |
+
|
| 1194 |
+
c10::intrusive_ptr<intra_node_comm::IntraNodeComm> intraNodeComm_;
|
| 1195 |
+
|
| 1196 |
+
// Number of devices on this node.
|
| 1197 |
+
int localDeviceCount_{0};
|
| 1198 |
+
|
| 1199 |
+
std::shared_ptr<ProcessGroupStatus> pgStatus_ =
|
| 1200 |
+
std::make_shared<ProcessGroupStatus>();
|
| 1201 |
+
};
|
| 1202 |
+
|
| 1203 |
+
// Dumps the NCCL comm traces and additional information about the Process
|
| 1204 |
+
// Group.
|
| 1205 |
+
TORCH_API std::string dump_nccl_trace(
|
| 1206 |
+
bool includeCollectives,
|
| 1207 |
+
bool includeStackTraces,
|
| 1208 |
+
bool onlyActive);
|
| 1209 |
+
|
| 1210 |
+
// Dumps the NCCL comm traces and additional information about the Process
|
| 1211 |
+
// Group in JSON formatted string.
|
| 1212 |
+
// We don't include stack traces in JSON format as it is far too much data.
|
| 1213 |
+
TORCH_API std::string dump_nccl_trace_json(
|
| 1214 |
+
bool includeCollectives,
|
| 1215 |
+
bool onlyActive);
|
| 1216 |
+
|
| 1217 |
+
// Gets a mutable reference to a global optional function.Heartbeat Monitor
|
| 1218 |
+
// will use this function to dump traces, if available. Inside fbcode, we
|
| 1219 |
+
// store a function here that uses an internal tool for process tracing
|
| 1220 |
+
TORCH_API std::optional<
|
| 1221 |
+
std::function<void(std::function<void(const std::string&)>)>>&
|
| 1222 |
+
get_cpp_trace_dumper();
|
| 1223 |
+
|
| 1224 |
+
// Similar to get_cpp_trace_dumper, this stores a function defined in
|
| 1225 |
+
// torch-python layer that lets us check whether the GIL can be acquired,
|
| 1226 |
+
// helpful for instrumenting in cases where a hang was observed.
|
| 1227 |
+
typedef bool (*gil_checker_t)();
|
| 1228 |
+
|
| 1229 |
+
TORCH_API gil_checker_t& get_gil_checker();
|
| 1230 |
+
} // namespace c10d
|
| 1231 |
+
|
| 1232 |
+
#endif // USE_C10D_NCCL
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PyProcessGroup.hpp
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
|
| 4 |
+
#include <torch/csrc/jit/python/pybind_utils.h>
|
| 5 |
+
#include <torch/csrc/utils/pybind.h>
|
| 6 |
+
|
| 7 |
+
namespace c10d {
|
| 8 |
+
|
| 9 |
+
// PyProcessGroup is a pybind11 trampoline class to allow a Python
|
| 10 |
+
// class to inherit from torch.distributed.ProcessGroup
|
| 11 |
+
class PyProcessGroup : public ProcessGroup {
|
| 12 |
+
public:
|
| 13 |
+
// PyWork is a pybind11 trampoline class to allow a Python
|
| 14 |
+
// class to inherit from torch.distributed.Work
|
| 15 |
+
class TORCH_PYTHON_API PyWork : public Work {
|
| 16 |
+
public:
|
| 17 |
+
PyWork() = default;
|
| 18 |
+
|
| 19 |
+
bool wait(std::chrono::milliseconds timeout = kNoTimeout) override {
|
| 20 |
+
PYBIND11_OVERRIDE(
|
| 21 |
+
bool, /* Return type */
|
| 22 |
+
Work, /* Parent class */
|
| 23 |
+
wait, /* Name of function in C++ */
|
| 24 |
+
timeout);
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
c10::intrusive_ptr<c10::ivalue::Future> getFuture() override {
|
| 28 |
+
// We cannot use PYBIND11_OVERRIDE because:
|
| 29 |
+
// 1. We have to >MANUALLY< unwrap the PyFutureWrapper and
|
| 30 |
+
// 2. The python name is get_future
|
| 31 |
+
pybind11::gil_scoped_acquire gil;
|
| 32 |
+
auto override =
|
| 33 |
+
pybind11::get_override(static_cast<const Work*>(this), "get_future");
|
| 34 |
+
|
| 35 |
+
if (override) {
|
| 36 |
+
py::object o = override();
|
| 37 |
+
auto futWrapper =
|
| 38 |
+
o.cast<std::shared_ptr<torch::jit::PythonFutureWrapper>>();
|
| 39 |
+
return futWrapper->fut;
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
return Work::getFuture();
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
// Take a reference of the corresponding py::object.
|
| 46 |
+
// With functional collectives, ownership of work objects is generally
|
| 47 |
+
// transferred to C++. For pure C++ work objects, it is sufficient to
|
| 48 |
+
// transfer the ownership of work object. For user-defined work objects in
|
| 49 |
+
// Python, it is necessary to keep the corresponding py::object alive in
|
| 50 |
+
// addition to ensure that the user-defined methods can be executed.
|
| 51 |
+
void ref_py_object() {
|
| 52 |
+
py_obj_ = py::cast(this);
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
private:
|
| 56 |
+
py::object py_obj_;
|
| 57 |
+
};
|
| 58 |
+
|
| 59 |
+
using ProcessGroup::ProcessGroup;
|
| 60 |
+
|
| 61 |
+
const std::string getBackendName() const override {
|
| 62 |
+
PYBIND11_OVERRIDE_PURE(
|
| 63 |
+
std::string, /* Return type */
|
| 64 |
+
ProcessGroup, /* Parent class */
|
| 65 |
+
getBackendName, /* Name of function in C++ */
|
| 66 |
+
);
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
c10::intrusive_ptr<Work> allgather(
|
| 70 |
+
std::vector<std::vector<at::Tensor>>& outputTensors,
|
| 71 |
+
std::vector<at::Tensor>& inputTensors,
|
| 72 |
+
const AllgatherOptions& opts = AllgatherOptions()) override {
|
| 73 |
+
PYBIND11_OVERRIDE(
|
| 74 |
+
c10::intrusive_ptr<Work>, /* Return type */
|
| 75 |
+
ProcessGroup, /* Parent class */
|
| 76 |
+
allgather, /* Name of function in C++ */
|
| 77 |
+
outputTensors,
|
| 78 |
+
inputTensors,
|
| 79 |
+
opts);
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
c10::intrusive_ptr<Work> allgather_into_tensor_coalesced(
|
| 83 |
+
std::vector<at::Tensor>& outputTensors,
|
| 84 |
+
std::vector<at::Tensor>& inputTensors,
|
| 85 |
+
const AllgatherOptions& opts = AllgatherOptions()) override {
|
| 86 |
+
PYBIND11_OVERRIDE(
|
| 87 |
+
c10::intrusive_ptr<Work>, /* Return type */
|
| 88 |
+
ProcessGroup, /* Parent class */
|
| 89 |
+
allgather_into_tensor_coalesced, /* Name of function in C++ */
|
| 90 |
+
outputTensors,
|
| 91 |
+
inputTensors,
|
| 92 |
+
opts);
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
c10::intrusive_ptr<Work> allreduce(
|
| 96 |
+
std::vector<at::Tensor>& tensors,
|
| 97 |
+
const AllreduceOptions& opts = AllreduceOptions()) override {
|
| 98 |
+
PYBIND11_OVERRIDE(
|
| 99 |
+
c10::intrusive_ptr<Work>, /* Return type */
|
| 100 |
+
ProcessGroup, /* Parent class */
|
| 101 |
+
allreduce, /* Name of function in C++ */
|
| 102 |
+
tensors,
|
| 103 |
+
opts);
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
c10::intrusive_ptr<Work> allreduce_coalesced(
|
| 107 |
+
std::vector<at::Tensor>& tensors,
|
| 108 |
+
const AllreduceCoalescedOptions& opts =
|
| 109 |
+
AllreduceCoalescedOptions()) override {
|
| 110 |
+
PYBIND11_OVERRIDE(
|
| 111 |
+
c10::intrusive_ptr<Work>, /* Return type */
|
| 112 |
+
ProcessGroup, /* Parent class */
|
| 113 |
+
allreduce_coalesced, /* Name of function in C++ */
|
| 114 |
+
tensors,
|
| 115 |
+
opts);
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
c10::intrusive_ptr<Work> alltoall_base(
|
| 119 |
+
at::Tensor& outputBuffer,
|
| 120 |
+
at::Tensor& inputBuffer,
|
| 121 |
+
std::vector<int64_t>& outputSplitSizes,
|
| 122 |
+
std::vector<int64_t>& inputSplitSizes,
|
| 123 |
+
const AllToAllOptions& opts = AllToAllOptions()) override {
|
| 124 |
+
PYBIND11_OVERRIDE(
|
| 125 |
+
c10::intrusive_ptr<Work>, /* Return type */
|
| 126 |
+
ProcessGroup, /* Parent class */
|
| 127 |
+
alltoall_base, /* Name of function in C++ */
|
| 128 |
+
outputBuffer,
|
| 129 |
+
inputBuffer,
|
| 130 |
+
outputSplitSizes,
|
| 131 |
+
inputSplitSizes,
|
| 132 |
+
opts);
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
c10::intrusive_ptr<Work> barrier(
|
| 136 |
+
const BarrierOptions& opts = BarrierOptions()) override {
|
| 137 |
+
PYBIND11_OVERRIDE(
|
| 138 |
+
c10::intrusive_ptr<Work>, /* Return type */
|
| 139 |
+
ProcessGroup, /* Parent class */
|
| 140 |
+
barrier, /* Name of function in C++ */
|
| 141 |
+
opts);
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
c10::intrusive_ptr<Work> broadcast(
|
| 145 |
+
std::vector<at::Tensor>& tensors,
|
| 146 |
+
const BroadcastOptions& opts = BroadcastOptions()) override {
|
| 147 |
+
PYBIND11_OVERRIDE(
|
| 148 |
+
c10::intrusive_ptr<Work>, /* Return type */
|
| 149 |
+
ProcessGroup, /* Parent class */
|
| 150 |
+
broadcast, /* Name of function in C++ */
|
| 151 |
+
tensors,
|
| 152 |
+
opts);
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
c10::intrusive_ptr<Work> reduce_scatter(
|
| 156 |
+
std::vector<at::Tensor>& outputTensors,
|
| 157 |
+
std::vector<std::vector<at::Tensor>>& inputTensors,
|
| 158 |
+
const ReduceScatterOptions& opts = ReduceScatterOptions()) override {
|
| 159 |
+
PYBIND11_OVERRIDE(
|
| 160 |
+
c10::intrusive_ptr<Work>, /* Return type */
|
| 161 |
+
ProcessGroup, /* Parent class */
|
| 162 |
+
reduce_scatter, /* Name of function in C++ */
|
| 163 |
+
outputTensors,
|
| 164 |
+
inputTensors,
|
| 165 |
+
opts);
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
c10::intrusive_ptr<Work> reduce_scatter_tensor_coalesced(
|
| 169 |
+
std::vector<at::Tensor>& outputTensors,
|
| 170 |
+
std::vector<at::Tensor>& inputTensors,
|
| 171 |
+
const ReduceScatterOptions& opts = ReduceScatterOptions()) override {
|
| 172 |
+
PYBIND11_OVERRIDE(
|
| 173 |
+
c10::intrusive_ptr<Work>, /* Return type */
|
| 174 |
+
ProcessGroup, /* Parent class */
|
| 175 |
+
reduce_scatter_tensor_coalesced, /* Name of function in C++ */
|
| 176 |
+
outputTensors,
|
| 177 |
+
inputTensors,
|
| 178 |
+
opts);
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
c10::intrusive_ptr<Work> send(
|
| 182 |
+
std::vector<at::Tensor>& tensors,
|
| 183 |
+
int dstRank,
|
| 184 |
+
int tag) override {
|
| 185 |
+
PYBIND11_OVERRIDE(
|
| 186 |
+
c10::intrusive_ptr<Work>, /* Return type */
|
| 187 |
+
ProcessGroup, /* Parent class */
|
| 188 |
+
send, /* Name of function in C++ */
|
| 189 |
+
tensors,
|
| 190 |
+
dstRank,
|
| 191 |
+
tag);
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
c10::intrusive_ptr<Work> recv(
|
| 195 |
+
std::vector<at::Tensor>& tensors,
|
| 196 |
+
int srcRank,
|
| 197 |
+
int tag) override {
|
| 198 |
+
PYBIND11_OVERRIDE(
|
| 199 |
+
c10::intrusive_ptr<Work>, /* Return type */
|
| 200 |
+
ProcessGroup, /* Parent class */
|
| 201 |
+
recv, /* Name of function in C++ */
|
| 202 |
+
tensors,
|
| 203 |
+
srcRank,
|
| 204 |
+
tag);
|
| 205 |
+
}
|
| 206 |
+
};
|
| 207 |
+
|
| 208 |
+
class TORCH_PYTHON_API PythonOnCompletionHook {
|
| 209 |
+
public:
|
| 210 |
+
// Wraps a py::object hook and acquires Python GIL in dtor before
|
| 211 |
+
// destructing the hook object.
|
| 212 |
+
PythonOnCompletionHook(py::object hook) : hook_(std::move(hook)) {}
|
| 213 |
+
|
| 214 |
+
~PythonOnCompletionHook() {
|
| 215 |
+
py::gil_scoped_acquire ag;
|
| 216 |
+
hook_.dec_ref();
|
| 217 |
+
// Explicitly set hook_ to nullptr to prevent py::object's dtor
|
| 218 |
+
// to decref on the PyObject again.
|
| 219 |
+
// See Note [Destructing py::object] in python_ivalue.h
|
| 220 |
+
hook_.ptr() = nullptr;
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
void operator()(const std::shared_ptr<WorkInfo>& workInfo) const {
|
| 224 |
+
std::exception_ptr eptr;
|
| 225 |
+
{
|
| 226 |
+
py::gil_scoped_acquire acquire;
|
| 227 |
+
try {
|
| 228 |
+
hook_(workInfo);
|
| 229 |
+
} catch (py::error_already_set& e) {
|
| 230 |
+
// py::error_already_set requires GIL to destruct, take
|
| 231 |
+
// special care.
|
| 232 |
+
eptr = std::make_exception_ptr(std::runtime_error(e.what()));
|
| 233 |
+
e.restore();
|
| 234 |
+
PyErr_Clear();
|
| 235 |
+
} catch (std::exception& e) {
|
| 236 |
+
eptr = std::current_exception();
|
| 237 |
+
}
|
| 238 |
+
}
|
| 239 |
+
// No more Python-related stuff at this point, i.e., this
|
| 240 |
+
// exception can be captured and handled by PG backend.
|
| 241 |
+
if (eptr)
|
| 242 |
+
std::rethrow_exception(eptr);
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
private:
|
| 246 |
+
py::object hook_;
|
| 247 |
+
};
|
| 248 |
+
|
| 249 |
+
} // namespace c10d
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Store.hpp
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <chrono>
|
| 4 |
+
#include <cstdint>
|
| 5 |
+
#include <string>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
#include <c10/macros/Macros.h>
|
| 9 |
+
#include <torch/custom_class.h>
|
| 10 |
+
|
| 11 |
+
namespace c10d {
|
| 12 |
+
|
| 13 |
+
// callback function will be given arguments (std::optional<string> oldValue,
|
| 14 |
+
// std::optional<string> newValue)
|
| 15 |
+
using WatchKeyCallback =
|
| 16 |
+
std::function<void(std::optional<std::string>, std::optional<std::string>)>;
|
| 17 |
+
|
| 18 |
+
class TORCH_API Store : public torch::CustomClassHolder {
|
| 19 |
+
public:
|
| 20 |
+
static constexpr std::chrono::milliseconds kDefaultTimeout =
|
| 21 |
+
std::chrono::seconds(300);
|
| 22 |
+
static constexpr std::chrono::milliseconds kNoTimeout =
|
| 23 |
+
std::chrono::milliseconds::zero();
|
| 24 |
+
|
| 25 |
+
Store() : timeout_(kDefaultTimeout) {}
|
| 26 |
+
|
| 27 |
+
explicit Store(const std::chrono::milliseconds& timeout)
|
| 28 |
+
: timeout_(timeout) {}
|
| 29 |
+
|
| 30 |
+
Store(const Store&) = default;
|
| 31 |
+
Store(Store&&) noexcept = default;
|
| 32 |
+
|
| 33 |
+
~Store() override = default;
|
| 34 |
+
|
| 35 |
+
void set(const std::string& key, const std::string& value);
|
| 36 |
+
|
| 37 |
+
virtual void set(
|
| 38 |
+
const std::string& key,
|
| 39 |
+
const std::vector<uint8_t>& value) = 0;
|
| 40 |
+
|
| 41 |
+
std::string compareSet(
|
| 42 |
+
const std::string& key,
|
| 43 |
+
const std::string& currentValue,
|
| 44 |
+
const std::string& newValue);
|
| 45 |
+
|
| 46 |
+
virtual std::vector<uint8_t> compareSet(
|
| 47 |
+
const std::string& key,
|
| 48 |
+
const std::vector<uint8_t>& currentValue,
|
| 49 |
+
const std::vector<uint8_t>& newValue) {
|
| 50 |
+
TORCH_INTERNAL_ASSERT(false, "Not implemented.");
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
std::string get_to_str(const std::string& key);
|
| 54 |
+
|
| 55 |
+
virtual std::vector<uint8_t> get(const std::string& key) = 0;
|
| 56 |
+
|
| 57 |
+
virtual int64_t add(const std::string& key, int64_t value) = 0;
|
| 58 |
+
|
| 59 |
+
virtual bool deleteKey(const std::string& key) = 0;
|
| 60 |
+
|
| 61 |
+
virtual bool check(const std::vector<std::string>& keys) = 0;
|
| 62 |
+
|
| 63 |
+
virtual int64_t getNumKeys() = 0;
|
| 64 |
+
|
| 65 |
+
virtual void wait(const std::vector<std::string>& keys) = 0;
|
| 66 |
+
|
| 67 |
+
virtual void wait(
|
| 68 |
+
const std::vector<std::string>& keys,
|
| 69 |
+
const std::chrono::milliseconds& timeout) = 0;
|
| 70 |
+
|
| 71 |
+
virtual const std::chrono::milliseconds& getTimeout() const noexcept;
|
| 72 |
+
|
| 73 |
+
virtual void setTimeout(const std::chrono::milliseconds& timeout);
|
| 74 |
+
|
| 75 |
+
// watchKey() is deprecated and no longer supported.
|
| 76 |
+
virtual void watchKey(
|
| 77 |
+
const std::string& /* unused */,
|
| 78 |
+
WatchKeyCallback /* unused */) {
|
| 79 |
+
TORCH_CHECK(false, "watchKey is deprecated, no implementation support it.");
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
virtual void append(
|
| 83 |
+
const std::string& key,
|
| 84 |
+
const std::vector<uint8_t>& value);
|
| 85 |
+
|
| 86 |
+
virtual std::vector<std::vector<uint8_t>> multiGet(
|
| 87 |
+
const std::vector<std::string>& keys);
|
| 88 |
+
|
| 89 |
+
virtual void multiSet(
|
| 90 |
+
const std::vector<std::string>& keys,
|
| 91 |
+
const std::vector<std::vector<uint8_t>>& values);
|
| 92 |
+
|
| 93 |
+
// Returns true if this store support append, multiGet and multiSet
|
| 94 |
+
virtual bool hasExtendedApi() const;
|
| 95 |
+
|
| 96 |
+
protected:
|
| 97 |
+
std::chrono::milliseconds timeout_;
|
| 98 |
+
};
|
| 99 |
+
|
| 100 |
+
/*
|
| 101 |
+
StoreTimeoutGuard is a RAII guard that will set the store timeout and restore it
|
| 102 |
+
when it returns.
|
| 103 |
+
*/
|
| 104 |
+
class StoreTimeoutGuard {
|
| 105 |
+
public:
|
| 106 |
+
explicit StoreTimeoutGuard(
|
| 107 |
+
Store& store,
|
| 108 |
+
const std::chrono::milliseconds& timeout)
|
| 109 |
+
: store_(store), oldTimeout_(store.getTimeout()) {
|
| 110 |
+
store.setTimeout(timeout);
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
~StoreTimeoutGuard() {
|
| 114 |
+
store_.setTimeout(oldTimeout_);
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
/* Disabling copy and move semantics */
|
| 118 |
+
StoreTimeoutGuard(const StoreTimeoutGuard&) = delete;
|
| 119 |
+
StoreTimeoutGuard& operator=(const StoreTimeoutGuard&) = delete;
|
| 120 |
+
StoreTimeoutGuard(StoreTimeoutGuard&&) = delete;
|
| 121 |
+
StoreTimeoutGuard& operator=(StoreTimeoutGuard&&) = delete;
|
| 122 |
+
|
| 123 |
+
private:
|
| 124 |
+
Store& store_;
|
| 125 |
+
std::chrono::milliseconds oldTimeout_{};
|
| 126 |
+
};
|
| 127 |
+
|
| 128 |
+
} // namespace c10d
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCTracing.hpp
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifdef USE_C10D_UCC
|
| 4 |
+
|
| 5 |
+
#include <torch/csrc/distributed/c10d/UCCUtils.hpp>
|
| 6 |
+
|
| 7 |
+
namespace c10d {
|
| 8 |
+
|
| 9 |
+
#define RECORD_COMMS_TRACE( \
|
| 10 |
+
_comms_tracer, _work, _opType, _rank, _comm_size, _inTensors, _outTensors) \
|
| 11 |
+
do { \
|
| 12 |
+
if (torch_ucc_config.enable_comms_logger) { \
|
| 13 |
+
_comms_tracer->recordComms( \
|
| 14 |
+
opTypeToString(_opType), \
|
| 15 |
+
(uintptr_t)_work.get(), \
|
| 16 |
+
_rank, \
|
| 17 |
+
_comm_size, \
|
| 18 |
+
_inTensors, \
|
| 19 |
+
_outTensors); \
|
| 20 |
+
} \
|
| 21 |
+
} while (0)
|
| 22 |
+
|
| 23 |
+
// interfaces to collect communication traces
|
| 24 |
+
class TORCH_API CommTraceLogger : public torch::CustomClassHolder {
|
| 25 |
+
private:
|
| 26 |
+
std::vector<std::string> comms_trace_;
|
| 27 |
+
std::vector<std::string> curBlocks_; /* unused */
|
| 28 |
+
std::vector<int64_t> curOutSplitSizes_;
|
| 29 |
+
std::vector<int64_t> curInSplitSizes_;
|
| 30 |
+
int curRoot_ = -1;
|
| 31 |
+
unsigned long seqnum = 0;
|
| 32 |
+
|
| 33 |
+
public:
|
| 34 |
+
void setCurBlock(const std::string& name); /* unused */
|
| 35 |
+
void popBlock(); /* unused */
|
| 36 |
+
// record root info if applicable, e.g., broadcast, gather, scatter
|
| 37 |
+
void recordOptionalInfo(int root = -1);
|
| 38 |
+
// record input/output splits of Alltoallv
|
| 39 |
+
void recordOptionalInfo(
|
| 40 |
+
const std::vector<int64_t>& outputSplitSizes = {},
|
| 41 |
+
const std::vector<int64_t>& inputSplitSizes = {});
|
| 42 |
+
// record essential comms information
|
| 43 |
+
void recordComms(
|
| 44 |
+
const std::string& collName,
|
| 45 |
+
const uintptr_t workReq = 0,
|
| 46 |
+
const int rank = -1,
|
| 47 |
+
const int world_size = -1,
|
| 48 |
+
const std::vector<at::Tensor>& inputTensors = {},
|
| 49 |
+
const std::vector<at::Tensor>& outputTensor = {});
|
| 50 |
+
// return collected comms traces
|
| 51 |
+
std::vector<std::string>& getCommsTrace() {
|
| 52 |
+
return comms_trace_;
|
| 53 |
+
}
|
| 54 |
+
};
|
| 55 |
+
|
| 56 |
+
} // namespace c10d
|
| 57 |
+
|
| 58 |
+
#endif // USE_C10D_UCC
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UnixSockUtils.hpp
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/c10d/Utils.hpp>
|
| 4 |
+
|
| 5 |
+
namespace c10d::tcputil {
|
| 6 |
+
|
| 7 |
+
#define CONNECT_SOCKET_OFFSET 2
|
| 8 |
+
|
| 9 |
+
inline int poll(struct pollfd* fds, unsigned long nfds, int timeout) {
|
| 10 |
+
return ::poll(fds, nfds, timeout);
|
| 11 |
+
}
|
| 12 |
+
|
| 13 |
+
inline void addPollfd(
|
| 14 |
+
std::vector<struct pollfd>& fds,
|
| 15 |
+
int socket,
|
| 16 |
+
short events) {
|
| 17 |
+
fds.push_back({.fd = socket, .events = events});
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
inline struct ::pollfd getPollfd(int socket, short events) {
|
| 21 |
+
struct ::pollfd res = {.fd = socket, .events = events};
|
| 22 |
+
return res;
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
} // namespace c10d::tcputil
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Utils.hpp
ADDED
|
@@ -0,0 +1,729 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/ATen.h>
|
| 4 |
+
#include <c10/util/Exception.h>
|
| 5 |
+
#include <c10/util/accumulate.h>
|
| 6 |
+
#include <c10/util/irange.h>
|
| 7 |
+
#include <torch/csrc/distributed/c10d/Types.hpp>
|
| 8 |
+
|
| 9 |
+
#ifdef _WIN32
|
| 10 |
+
#include <winsock2.h>
|
| 11 |
+
#include <ws2tcpip.h>
|
| 12 |
+
typedef SSIZE_T ssize_t;
|
| 13 |
+
#pragma comment(lib, "Ws2_32.lib")
|
| 14 |
+
#else
|
| 15 |
+
#include <fcntl.h>
|
| 16 |
+
#include <netdb.h>
|
| 17 |
+
#include <sys/poll.h>
|
| 18 |
+
#include <sys/socket.h>
|
| 19 |
+
#include <unistd.h>
|
| 20 |
+
#endif
|
| 21 |
+
|
| 22 |
+
#include <sys/types.h>
|
| 23 |
+
|
| 24 |
+
#include <cstdint>
|
| 25 |
+
#include <cstdlib>
|
| 26 |
+
#include <functional>
|
| 27 |
+
#include <string>
|
| 28 |
+
#include <vector>
|
| 29 |
+
|
| 30 |
+
namespace c10d {
|
| 31 |
+
|
| 32 |
+
TORCH_API size_t getTensorsNumel(const std::vector<at::Tensor>& tensors);
|
| 33 |
+
|
| 34 |
+
// Retrieve tensor shapes from a given tensor.
|
| 35 |
+
TORCH_API std::vector<at::Tensor> getTensorShapes(
|
| 36 |
+
const std::vector<at::Tensor>& tensors);
|
| 37 |
+
|
| 38 |
+
// Use -2 to represent unset state of env vars
|
| 39 |
+
#define C10D_ENV_NOT_SET -2
|
| 40 |
+
|
| 41 |
+
#define WARN_ENV_VAR_ONCE(deprecated_env, new_env) \
|
| 42 |
+
TORCH_WARN_ONCE( \
|
| 43 |
+
"Environment variable " + deprecated_env + " is deprecated; use " + \
|
| 44 |
+
new_env + " instead");
|
| 45 |
+
|
| 46 |
+
// Turns at::IntArrayRef into "(1, 2, 3, 4)".
|
| 47 |
+
inline std::string toString(at::IntArrayRef l) {
|
| 48 |
+
std::stringstream ss;
|
| 49 |
+
ss << "(";
|
| 50 |
+
for (const auto i : c10::irange(l.size())) {
|
| 51 |
+
if (i > 0) {
|
| 52 |
+
ss << ", ";
|
| 53 |
+
}
|
| 54 |
+
ss << l[i];
|
| 55 |
+
}
|
| 56 |
+
ss << ")";
|
| 57 |
+
return ss.str();
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
inline std::string toString(const c10::Layout& layout) {
|
| 61 |
+
std::stringstream ss;
|
| 62 |
+
ss << layout;
|
| 63 |
+
return ss.str();
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
inline void assertSameType(
|
| 67 |
+
const at::DeprecatedTypeProperties& type,
|
| 68 |
+
const std::vector<at::Tensor>& tensors) {
|
| 69 |
+
for (const auto i : c10::irange(tensors.size())) {
|
| 70 |
+
if (!tensors[i].options().type_equal(type.options())) {
|
| 71 |
+
const std::string expected = type.toString();
|
| 72 |
+
const std::string actual = tensors[i].toString();
|
| 73 |
+
throw std::invalid_argument(
|
| 74 |
+
// NOLINTNEXTLINE(performance-inefficient-string-concatenation)
|
| 75 |
+
"mixed types (" + expected + " and " + actual + ")");
|
| 76 |
+
}
|
| 77 |
+
}
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
inline std::vector<std::string> split(
|
| 81 |
+
char separator,
|
| 82 |
+
const std::string& string) {
|
| 83 |
+
std::vector<std::string> pieces;
|
| 84 |
+
std::stringstream ss(string);
|
| 85 |
+
std::string item;
|
| 86 |
+
while (std::getline(ss, item, separator)) {
|
| 87 |
+
pieces.push_back(std::move(item));
|
| 88 |
+
}
|
| 89 |
+
return pieces;
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
inline std::string getCvarString(
|
| 93 |
+
const std::vector<std::string>& env,
|
| 94 |
+
const char* def) {
|
| 95 |
+
const char* ret = def;
|
| 96 |
+
|
| 97 |
+
if (env.empty()) {
|
| 98 |
+
TORCH_CHECK(false, "No environment variables passed");
|
| 99 |
+
return ret;
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
/* parse environment variable in reverse order, so the early
|
| 103 |
+
* versions of a variable get higher priority than the latter
|
| 104 |
+
* versions of the same variable */
|
| 105 |
+
for (ssize_t i = static_cast<ssize_t>(env.size()) - 1; i >= 0; i--) {
|
| 106 |
+
const char* val = std::getenv(env[i].c_str());
|
| 107 |
+
if (val == nullptr) {
|
| 108 |
+
continue;
|
| 109 |
+
} else if (i) {
|
| 110 |
+
WARN_ENV_VAR_ONCE(env[i], env[0]);
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
ret = val;
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
return ret;
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
inline int getCvarInt(const std::vector<std::string>& env, int def) {
|
| 120 |
+
int ret = def;
|
| 121 |
+
|
| 122 |
+
if (env.empty()) {
|
| 123 |
+
TORCH_CHECK(false, "No environment variables passed");
|
| 124 |
+
return ret;
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
/* parse environment variable in reverse order, so the early
|
| 128 |
+
* versions of a variable get higher priority than the latter
|
| 129 |
+
* versions of the same variable */
|
| 130 |
+
for (ssize_t i = static_cast<ssize_t>(env.size()) - 1; i >= 0; i--) {
|
| 131 |
+
char* val = std::getenv(env[i].c_str());
|
| 132 |
+
if (val == nullptr) {
|
| 133 |
+
continue;
|
| 134 |
+
} else if (i) {
|
| 135 |
+
WARN_ENV_VAR_ONCE(env[i], env[0]);
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
try {
|
| 139 |
+
ret = std::stoi(val);
|
| 140 |
+
} catch (std::exception&) {
|
| 141 |
+
TORCH_CHECK(false, "Invalid value for environment variable: " + env[i]);
|
| 142 |
+
}
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
return ret;
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
inline bool getCvarBool(const std::vector<std::string>& env, bool def) {
|
| 149 |
+
bool ret = def;
|
| 150 |
+
|
| 151 |
+
if (env.empty()) {
|
| 152 |
+
TORCH_CHECK(false, "No environment variables passed");
|
| 153 |
+
return ret;
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
/* parse environment variable in reverse order, so the early
|
| 157 |
+
* versions of a variable get higher priority than the latter
|
| 158 |
+
* versions of the same variable */
|
| 159 |
+
for (ssize_t i = static_cast<ssize_t>(env.size()) - 1; i >= 0; i--) {
|
| 160 |
+
char* val_ = std::getenv(env[i].c_str());
|
| 161 |
+
if (val_ == nullptr) {
|
| 162 |
+
continue;
|
| 163 |
+
} else if (i) {
|
| 164 |
+
WARN_ENV_VAR_ONCE(env[i], env[0]);
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
std::string val = std::string(val_);
|
| 168 |
+
for (auto& x : val) {
|
| 169 |
+
// NOLINTNEXTLINE(*-narrowing-conversions)
|
| 170 |
+
x = std::tolower(x);
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
if (val == "y" || val == "yes" || val == "1" || val == "t" ||
|
| 174 |
+
val == "true") {
|
| 175 |
+
ret = true;
|
| 176 |
+
} else if (
|
| 177 |
+
val == "n" || val == "no" || val == "0" || val == "f" ||
|
| 178 |
+
val == "false") {
|
| 179 |
+
ret = false;
|
| 180 |
+
} else {
|
| 181 |
+
TORCH_CHECK(false, "Invalid value for environment variable: " + env[i]);
|
| 182 |
+
return ret;
|
| 183 |
+
}
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
return ret;
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
inline void assertSameSizes(
|
| 190 |
+
const at::IntArrayRef& sizes,
|
| 191 |
+
const std::vector<at::Tensor>& tensors) {
|
| 192 |
+
for (const auto i : c10::irange(tensors.size())) {
|
| 193 |
+
if (!tensors[i].sizes().equals(sizes)) {
|
| 194 |
+
const auto expected = toString(sizes);
|
| 195 |
+
const auto actual = toString(tensors[i].sizes());
|
| 196 |
+
throw std::invalid_argument(
|
| 197 |
+
// NOLINTNEXTLINE(performance-inefficient-string-concatenation)
|
| 198 |
+
"mixed sizes (" + expected + " and " + actual + ")");
|
| 199 |
+
}
|
| 200 |
+
}
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
inline void assertSameSizeAndType(const std::vector<at::Tensor>& tensors) {
|
| 204 |
+
// Ensure we have at least one tensor
|
| 205 |
+
if (tensors.empty()) {
|
| 206 |
+
throw std::invalid_argument("argument is empty");
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
// Ensure all tensors have identical type and shape
|
| 210 |
+
auto options = tensors[0].options();
|
| 211 |
+
auto sizes = tensors[0].sizes();
|
| 212 |
+
for (const auto i : c10::irange(1, tensors.size())) {
|
| 213 |
+
if (!tensors[i].options().type_equal(options)) {
|
| 214 |
+
const auto expected = toString(options);
|
| 215 |
+
const auto actual = toString(tensors[i].options());
|
| 216 |
+
throw std::invalid_argument(
|
| 217 |
+
// NOLINTNEXTLINE(performance-inefficient-string-concatenation)
|
| 218 |
+
"argument contains mixed types (" + expected + " and " + actual +
|
| 219 |
+
")");
|
| 220 |
+
}
|
| 221 |
+
if (!tensors[i].sizes().equals(sizes)) {
|
| 222 |
+
const auto expected = toString(sizes);
|
| 223 |
+
const auto actual = toString(tensors[i].sizes());
|
| 224 |
+
throw std::invalid_argument(
|
| 225 |
+
// NOLINTNEXTLINE(performance-inefficient-string-concatenation)
|
| 226 |
+
"argument contains mixed types (" + expected + " and " + actual +
|
| 227 |
+
")");
|
| 228 |
+
}
|
| 229 |
+
}
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
inline void assertTypeMatch(
|
| 233 |
+
const std::function<void(const std::string&)>& fn,
|
| 234 |
+
const at::DeprecatedTypeProperties& type,
|
| 235 |
+
const at::ArrayRef<at::Tensor> tensors,
|
| 236 |
+
size_t index) {
|
| 237 |
+
if (!tensors[index].options().type_equal(type.options())) {
|
| 238 |
+
fn("invalid tensor type at index " + std::to_string(index) + " (expected " +
|
| 239 |
+
type.toString() + ", got " + tensors[index].toString() + ")");
|
| 240 |
+
}
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
inline void assertTypeMatch(
|
| 244 |
+
const std::function<void(const std::string&)>& fn,
|
| 245 |
+
const at::TensorOptions& options,
|
| 246 |
+
const at::ArrayRef<at::Tensor> tensors,
|
| 247 |
+
size_t index) {
|
| 248 |
+
if (!tensors[index].options().type_equal(options)) {
|
| 249 |
+
fn("invalid tensor type at index " + std::to_string(index) + " (expected " +
|
| 250 |
+
toString(options) + ", got " + toString(tensors[index].options()) + ")");
|
| 251 |
+
}
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
inline void assertSizesMatch(
|
| 255 |
+
const std::function<void(const std::string&)>& fn,
|
| 256 |
+
const at::IntArrayRef& sizes,
|
| 257 |
+
const at::ArrayRef<at::Tensor> tensors,
|
| 258 |
+
size_t index) {
|
| 259 |
+
if (tensors[index].sizes() != sizes) {
|
| 260 |
+
fn("invalid tensor size at index " + std::to_string(index) + " (expected " +
|
| 261 |
+
toString(sizes) + ", got " + toString(tensors[index].sizes()) + ")");
|
| 262 |
+
}
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
inline void assertLayoutMatch(
|
| 266 |
+
const std::function<void(const std::string&)>& fn,
|
| 267 |
+
const c10::Layout& expected,
|
| 268 |
+
const at::ArrayRef<at::Tensor> tensors,
|
| 269 |
+
size_t index) {
|
| 270 |
+
const auto& actual = tensors[index].layout();
|
| 271 |
+
if (actual != expected) {
|
| 272 |
+
fn("invalid tensor layout at index " + std::to_string(index) +
|
| 273 |
+
" (expected " + toString(expected) + ", got " + toString(actual) + ")");
|
| 274 |
+
}
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
inline void assertLayoutMatch(
|
| 278 |
+
const std::function<void(const std::string&)>& fn,
|
| 279 |
+
const at::ArrayRef<at::Tensor> tensors) {
|
| 280 |
+
const auto& layout = tensors[0].layout();
|
| 281 |
+
for (const auto i : c10::irange(1, tensors.size())) {
|
| 282 |
+
assertLayoutMatch(fn, layout, tensors, i);
|
| 283 |
+
}
|
| 284 |
+
}
|
| 285 |
+
|
| 286 |
+
inline void assertNonEmpty(
|
| 287 |
+
const std::function<void(const std::string&)>& fn,
|
| 288 |
+
const at::ArrayRef<at::Tensor> tensors) {
|
| 289 |
+
if (tensors.empty()) {
|
| 290 |
+
fn("requires non-empty tensor list");
|
| 291 |
+
}
|
| 292 |
+
}
|
| 293 |
+
|
| 294 |
+
inline void assertSingleElement(
|
| 295 |
+
const std::function<void(const std::string&)>& fn,
|
| 296 |
+
const at::ArrayRef<at::Tensor> tensors) {
|
| 297 |
+
if (tensors.size() != 1) {
|
| 298 |
+
fn("requires a single-element tensor list");
|
| 299 |
+
}
|
| 300 |
+
}
|
| 301 |
+
|
| 302 |
+
inline void assertSingleElementInput(
|
| 303 |
+
const std::function<void(const std::string&)>& fn,
|
| 304 |
+
const at::ArrayRef<at::Tensor> tensors) {
|
| 305 |
+
if (tensors.size() != 1) {
|
| 306 |
+
fn("requires a single-element input tensor list");
|
| 307 |
+
}
|
| 308 |
+
}
|
| 309 |
+
|
| 310 |
+
inline void assertSingleElementOutput(
|
| 311 |
+
const std::function<void(const std::string&)>& fn,
|
| 312 |
+
const at::ArrayRef<at::Tensor> tensors) {
|
| 313 |
+
if (tensors.size() != 1) {
|
| 314 |
+
fn("requires a single-element output tensor list");
|
| 315 |
+
}
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
inline void assertRootRank(
|
| 319 |
+
const std::function<void(const std::string&)>& fn,
|
| 320 |
+
int64_t rank,
|
| 321 |
+
int64_t size) {
|
| 322 |
+
if (rank < 0 || rank >= size) {
|
| 323 |
+
fn("invalid root rank: " + std::to_string(rank));
|
| 324 |
+
}
|
| 325 |
+
}
|
| 326 |
+
|
| 327 |
+
inline void assertRootTensor(
|
| 328 |
+
const std::function<void(const std::string&)>& fn,
|
| 329 |
+
int64_t rank,
|
| 330 |
+
int64_t size) {
|
| 331 |
+
if (rank < 0 || rank >= size) {
|
| 332 |
+
fn("invalid root tensor: " + std::to_string(rank));
|
| 333 |
+
}
|
| 334 |
+
}
|
| 335 |
+
|
| 336 |
+
inline void assertDense(
|
| 337 |
+
const std::function<void(const std::string&)>& fn,
|
| 338 |
+
const at::ArrayRef<at::Tensor> tensors) {
|
| 339 |
+
const auto& layout = tensors[0].layout();
|
| 340 |
+
if (layout != at::kStrided) {
|
| 341 |
+
fn("only supports dense tensors");
|
| 342 |
+
}
|
| 343 |
+
}
|
| 344 |
+
|
| 345 |
+
inline void assertCPU(
|
| 346 |
+
const std::function<void(const std::string&)>& fn,
|
| 347 |
+
const at::ArrayRef<at::Tensor> tensors) {
|
| 348 |
+
const auto& device = tensors[0].device();
|
| 349 |
+
if (device.type() != at::kCPU) {
|
| 350 |
+
fn("only supports CPU tensors");
|
| 351 |
+
}
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
inline void assertSameDevice(
|
| 355 |
+
const std::function<void(const std::string&)>& fn,
|
| 356 |
+
const at::ArrayRef<at::Tensor> tensors) {
|
| 357 |
+
if (tensors.size() < 2) {
|
| 358 |
+
return;
|
| 359 |
+
}
|
| 360 |
+
const auto& device = tensors[0].device();
|
| 361 |
+
for (const auto i : c10::irange(1, tensors.size())) {
|
| 362 |
+
if (tensors[i].device() != device) {
|
| 363 |
+
fn("tensors should be on the same device");
|
| 364 |
+
}
|
| 365 |
+
}
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
inline void assertTypeAndSizesMatch(
|
| 369 |
+
const std::function<void(const std::string&)>& fn,
|
| 370 |
+
const at::ArrayRef<at::Tensor> tensors,
|
| 371 |
+
const at::DeprecatedTypeProperties& type,
|
| 372 |
+
const at::IntArrayRef& sizes) {
|
| 373 |
+
for (const auto i : c10::irange(tensors.size())) {
|
| 374 |
+
assertTypeMatch(fn, type, tensors, i);
|
| 375 |
+
assertSizesMatch(fn, sizes, tensors, i);
|
| 376 |
+
}
|
| 377 |
+
}
|
| 378 |
+
|
| 379 |
+
inline void assertTypeAndSizesMatch(
|
| 380 |
+
const std::function<void(const std::string&)>& fn,
|
| 381 |
+
const at::ArrayRef<at::Tensor> tensors,
|
| 382 |
+
const at::TensorOptions& options,
|
| 383 |
+
const at::IntArrayRef& sizes) {
|
| 384 |
+
for (const auto i : c10::irange(tensors.size())) {
|
| 385 |
+
assertTypeMatch(fn, options, tensors, i);
|
| 386 |
+
assertSizesMatch(fn, sizes, tensors, i);
|
| 387 |
+
}
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
inline void assertTypeAndSizesMatch(
|
| 391 |
+
const std::function<void(const std::string&)>& fn,
|
| 392 |
+
const at::ArrayRef<at::Tensor> tensors) {
|
| 393 |
+
const auto& options = tensors[0].options();
|
| 394 |
+
const auto sizes = tensors[0].sizes();
|
| 395 |
+
assertTypeAndSizesMatch(fn, tensors.slice(1), options, sizes);
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
// Copied from ATen/core/functional.h.
|
| 399 |
+
template <typename F, typename T>
|
| 400 |
+
inline auto fmap(T& inputs, const F& fn)
|
| 401 |
+
-> std::vector<decltype(fn(*inputs.begin()))> {
|
| 402 |
+
std::vector<decltype(fn(*inputs.begin()))> r;
|
| 403 |
+
r.reserve(inputs.size());
|
| 404 |
+
for (auto& input : inputs) {
|
| 405 |
+
r.push_back(fn(input));
|
| 406 |
+
}
|
| 407 |
+
return r;
|
| 408 |
+
}
|
| 409 |
+
|
| 410 |
+
// Copied from torch/csrc/utils/tensor_flatten.h.
|
| 411 |
+
inline at::Tensor flattenDenseTensors(at::TensorList tensors) {
|
| 412 |
+
static const auto flatten = [](const at::Tensor& t) {
|
| 413 |
+
return t.contiguous().view({-1});
|
| 414 |
+
};
|
| 415 |
+
if (tensors.size() == 1) {
|
| 416 |
+
return flatten(tensors[0]);
|
| 417 |
+
}
|
| 418 |
+
return at::cat(::c10d::fmap(tensors, flatten));
|
| 419 |
+
}
|
| 420 |
+
|
| 421 |
+
inline at::Tensor newLikeFlat(
|
| 422 |
+
std::vector<std::vector<at::Tensor>>& tensors,
|
| 423 |
+
size_t deviceIdx) {
|
| 424 |
+
if (tensors.empty() || tensors[0].empty()) {
|
| 425 |
+
TORCH_CHECK(false, "Received an empty list");
|
| 426 |
+
}
|
| 427 |
+
if (deviceIdx >= tensors.size()) {
|
| 428 |
+
TORCH_CHECK(false, "Invalid device index");
|
| 429 |
+
}
|
| 430 |
+
auto& t = tensors[deviceIdx][0];
|
| 431 |
+
auto device = t.device();
|
| 432 |
+
for (const auto i : c10::irange(1, tensors[deviceIdx].size())) {
|
| 433 |
+
if (tensors[deviceIdx][i].device() != device) {
|
| 434 |
+
TORCH_CHECK(false, "Expecting all tensors on the same device");
|
| 435 |
+
}
|
| 436 |
+
}
|
| 437 |
+
at::DeviceGuard gpuGuard(device);
|
| 438 |
+
std::vector<int64_t> sizes{static_cast<int64_t>(tensors[deviceIdx].size())};
|
| 439 |
+
std::vector<int64_t> strides{static_cast<int64_t>(t.numel())};
|
| 440 |
+
sizes.insert(sizes.end(), t.sizes().begin(), t.sizes().end());
|
| 441 |
+
strides.insert(strides.end(), t.strides().begin(), t.strides().end());
|
| 442 |
+
return at::empty_strided(
|
| 443 |
+
sizes, strides, t.options().memory_format(std::nullopt));
|
| 444 |
+
}
|
| 445 |
+
|
| 446 |
+
inline at::Tensor newLikeFlat(std::vector<at::Tensor>& tensors) {
|
| 447 |
+
if (tensors.empty()) {
|
| 448 |
+
TORCH_CHECK(false, "Received an empty list");
|
| 449 |
+
}
|
| 450 |
+
auto& t = tensors[0];
|
| 451 |
+
at::DeviceGuard gpuGuard(t.device());
|
| 452 |
+
std::vector<int64_t> sizes{static_cast<int64_t>(tensors.size())};
|
| 453 |
+
sizes.insert(sizes.end(), t.sizes().begin(), t.sizes().end());
|
| 454 |
+
return at::empty(sizes, t.options());
|
| 455 |
+
}
|
| 456 |
+
|
| 457 |
+
inline std::vector<std::vector<int64_t>> getSizes(
|
| 458 |
+
const std::vector<at::Tensor>& tensors) {
|
| 459 |
+
std::vector<std::vector<int64_t>> sizes(tensors.size());
|
| 460 |
+
for (const auto i : c10::irange(tensors.size())) {
|
| 461 |
+
sizes[i] = tensors[i].sizes().vec();
|
| 462 |
+
}
|
| 463 |
+
return sizes;
|
| 464 |
+
}
|
| 465 |
+
|
| 466 |
+
inline std::vector<int> getDevices(const std::vector<at::Tensor>& tensors) {
|
| 467 |
+
std::vector<int> devices(tensors.size(), -1);
|
| 468 |
+
if (tensors[0].device().is_cuda()) {
|
| 469 |
+
for (const auto i : c10::irange(tensors.size())) {
|
| 470 |
+
// NOLINTNEXTLINE(bugprone-signed-char-misuse)
|
| 471 |
+
devices[i] = tensors[i].storage().device().index();
|
| 472 |
+
}
|
| 473 |
+
}
|
| 474 |
+
return devices;
|
| 475 |
+
}
|
| 476 |
+
|
| 477 |
+
template <typename T>
|
| 478 |
+
inline T* getDataPointer(const at::Tensor& tensor) {
|
| 479 |
+
// This method is only used in ProcessGroupGloo for now. Call sites must make
|
| 480 |
+
// sure that the input tensor is contiguous. It is OK if the tensor does not
|
| 481 |
+
// start from the beginning of the storage. For example, it could come from
|
| 482 |
+
// chunk(..., dim=0)[1]. Hence, we need to use data_ptr() instead of
|
| 483 |
+
// tensor.storage().data()
|
| 484 |
+
// NB: not using tensor.data<T>() because tensor is not aware of gloo::TYPE
|
| 485 |
+
return static_cast<T*>(tensor.data_ptr());
|
| 486 |
+
}
|
| 487 |
+
|
| 488 |
+
template <typename T>
|
| 489 |
+
std::vector<T*> getDataPointers(const std::vector<at::Tensor>& tensors) {
|
| 490 |
+
std::vector<T*> ptrs(tensors.size());
|
| 491 |
+
for (const auto i : c10::irange(tensors.size())) {
|
| 492 |
+
ptrs[i] = getDataPointer<T>(tensors[i]);
|
| 493 |
+
}
|
| 494 |
+
return ptrs;
|
| 495 |
+
}
|
| 496 |
+
|
| 497 |
+
// For alltoall split size sanity check
|
| 498 |
+
inline void checkSplitSizes(
|
| 499 |
+
const std::vector<int64_t>& split_sizes,
|
| 500 |
+
const at::Tensor& tensor,
|
| 501 |
+
int group_size) {
|
| 502 |
+
if (split_sizes.empty()) {
|
| 503 |
+
TORCH_CHECK(
|
| 504 |
+
tensor.size(0) % group_size == 0,
|
| 505 |
+
"Tensor's dim 0 does not divide equally across group size");
|
| 506 |
+
} else {
|
| 507 |
+
TORCH_CHECK(
|
| 508 |
+
split_sizes.size() == static_cast<size_t>(group_size),
|
| 509 |
+
"Number of tensor splits not equal to group size");
|
| 510 |
+
const auto sum = c10::sum_integers(split_sizes);
|
| 511 |
+
TORCH_CHECK(
|
| 512 |
+
sum == tensor.size(0), "Split sizes doesn't match total dim 0 size");
|
| 513 |
+
}
|
| 514 |
+
}
|
| 515 |
+
|
| 516 |
+
// Compute alltoall lengths and offsets, handling multi-dimension tensors
|
| 517 |
+
template <typename T>
|
| 518 |
+
size_t computeLengthsAndOffsets(
|
| 519 |
+
const std::vector<int64_t>& split_sizes,
|
| 520 |
+
const at::Tensor& tensor,
|
| 521 |
+
std::vector<T>* lengths,
|
| 522 |
+
std::vector<T>* offsets) {
|
| 523 |
+
size_t group_size = lengths->size();
|
| 524 |
+
bool equal_splits = false;
|
| 525 |
+
size_t dim0_size = tensor.size(0);
|
| 526 |
+
size_t row_size = (dim0_size ? tensor.numel() / dim0_size : 1);
|
| 527 |
+
size_t split_size = 0;
|
| 528 |
+
size_t offset = 0;
|
| 529 |
+
|
| 530 |
+
if (split_sizes.empty()) {
|
| 531 |
+
equal_splits = true;
|
| 532 |
+
split_size = tensor.size(0) / group_size;
|
| 533 |
+
}
|
| 534 |
+
for (const auto i : c10::irange(group_size)) {
|
| 535 |
+
size_t length = row_size * (equal_splits ? split_size : split_sizes[i]);
|
| 536 |
+
(*lengths)[i] = length;
|
| 537 |
+
(*offsets)[i] = offset;
|
| 538 |
+
// TODO: see if we should add overflow protection for offset
|
| 539 |
+
offset += length;
|
| 540 |
+
}
|
| 541 |
+
return offset;
|
| 542 |
+
}
|
| 543 |
+
|
| 544 |
+
template <typename T>
|
| 545 |
+
size_t computeLengthsAndOffsets(
|
| 546 |
+
const std::vector<at::Tensor>& tensors,
|
| 547 |
+
std::vector<T>* lengths,
|
| 548 |
+
std::vector<T>* offsets) {
|
| 549 |
+
size_t group_size = lengths->size();
|
| 550 |
+
size_t offset = 0;
|
| 551 |
+
for (const auto i : c10::irange(group_size)) {
|
| 552 |
+
size_t length = tensors[i].numel();
|
| 553 |
+
(*lengths)[i] = length;
|
| 554 |
+
(*offsets)[i] = offset;
|
| 555 |
+
offset += length;
|
| 556 |
+
}
|
| 557 |
+
return offset;
|
| 558 |
+
}
|
| 559 |
+
|
| 560 |
+
using RankType = uint32_t;
|
| 561 |
+
using SizeType = uint64_t;
|
| 562 |
+
|
| 563 |
+
// `errno` is only meaningful when it fails. E.g., a successful `fork()` sets
|
| 564 |
+
// `errno` to `EINVAL` in child process on some macos
|
| 565 |
+
// (https://stackoverflow.com/a/20295079), and thus `errno` should really only
|
| 566 |
+
// be inspected if an error occurred.
|
| 567 |
+
//
|
| 568 |
+
// `success_cond` is an expression used to check if an error has happend. So for
|
| 569 |
+
// `fork()`, we can use `SYSCHECK(pid = fork(), pid != -1)`. The function output
|
| 570 |
+
// is stored in variable `__output` and may be used in `success_cond`.
|
| 571 |
+
#ifdef _WIN32
|
| 572 |
+
#define SYSCHECK(expr, success_cond) \
|
| 573 |
+
while (true) { \
|
| 574 |
+
auto __output = (expr); \
|
| 575 |
+
auto errno_local = WSAGetLastError(); \
|
| 576 |
+
(void)__output; \
|
| 577 |
+
if (!(success_cond)) { \
|
| 578 |
+
if (errno == EINTR) { \
|
| 579 |
+
continue; \
|
| 580 |
+
} else if ( \
|
| 581 |
+
errno_local == WSAETIMEDOUT || errno_local == WSAEWOULDBLOCK) { \
|
| 582 |
+
C10_THROW_ERROR(DistNetworkError, "Socket Timeout"); \
|
| 583 |
+
} else { \
|
| 584 |
+
C10_THROW_ERROR(DistNetworkError, std::strerror(errno_local)); \
|
| 585 |
+
} \
|
| 586 |
+
} else { \
|
| 587 |
+
break; \
|
| 588 |
+
} \
|
| 589 |
+
}
|
| 590 |
+
#else
|
| 591 |
+
#define SYSCHECK(expr, success_cond) \
|
| 592 |
+
while (true) { \
|
| 593 |
+
auto __output = (expr); \
|
| 594 |
+
(void)__output; \
|
| 595 |
+
if (!(success_cond)) { \
|
| 596 |
+
if (errno == EINTR) { \
|
| 597 |
+
continue; \
|
| 598 |
+
} else if (errno == EAGAIN || errno == EWOULDBLOCK) { \
|
| 599 |
+
C10_THROW_ERROR(DistNetworkError, "Socket Timeout"); \
|
| 600 |
+
} else { \
|
| 601 |
+
C10_THROW_ERROR(DistNetworkError, std::strerror(errno)); \
|
| 602 |
+
} \
|
| 603 |
+
} else { \
|
| 604 |
+
break; \
|
| 605 |
+
} \
|
| 606 |
+
}
|
| 607 |
+
#endif
|
| 608 |
+
|
| 609 |
+
// Most functions indicate error by returning `-1`. This is a helper macro for
|
| 610 |
+
// this common case with `SYSCHECK`.
|
| 611 |
+
// Since SOCKET_ERROR = -1 in MSVC, so also leverage SYSCHECK_ERR_RETURN_NEG1
|
| 612 |
+
#define SYSCHECK_ERR_RETURN_NEG1(expr) SYSCHECK(expr, __output != -1)
|
| 613 |
+
|
| 614 |
+
namespace tcputil {
|
| 615 |
+
|
| 616 |
+
// Send and receive
|
| 617 |
+
template <typename T>
|
| 618 |
+
void sendBytes(
|
| 619 |
+
int socket,
|
| 620 |
+
const T* buffer,
|
| 621 |
+
size_t length,
|
| 622 |
+
bool moreData = false) {
|
| 623 |
+
size_t bytesToSend = sizeof(T) * length;
|
| 624 |
+
if (bytesToSend == 0) {
|
| 625 |
+
return;
|
| 626 |
+
}
|
| 627 |
+
|
| 628 |
+
auto currentBytes = reinterpret_cast<const char*>(buffer);
|
| 629 |
+
|
| 630 |
+
int flags = 0;
|
| 631 |
+
|
| 632 |
+
#ifdef MSG_MORE
|
| 633 |
+
if (moreData) { // there is more data to send
|
| 634 |
+
flags |= MSG_MORE;
|
| 635 |
+
}
|
| 636 |
+
#endif
|
| 637 |
+
|
| 638 |
+
// Ignore SIGPIPE as the send() return value is always checked for error
|
| 639 |
+
#ifdef MSG_NOSIGNAL
|
| 640 |
+
flags |= MSG_NOSIGNAL;
|
| 641 |
+
#endif
|
| 642 |
+
|
| 643 |
+
while (bytesToSend > 0) {
|
| 644 |
+
ssize_t bytesSent = 0;
|
| 645 |
+
SYSCHECK_ERR_RETURN_NEG1(
|
| 646 |
+
bytesSent = ::send(socket, currentBytes, bytesToSend, flags))
|
| 647 |
+
if (bytesSent == 0) {
|
| 648 |
+
C10_THROW_ERROR(DistNetworkError, "failed to send, sent 0 bytes");
|
| 649 |
+
}
|
| 650 |
+
|
| 651 |
+
bytesToSend -= bytesSent;
|
| 652 |
+
currentBytes += bytesSent;
|
| 653 |
+
}
|
| 654 |
+
}
|
| 655 |
+
|
| 656 |
+
template <typename T>
|
| 657 |
+
void recvBytes(int socket, T* buffer, size_t length) {
|
| 658 |
+
size_t bytesToReceive = sizeof(T) * length;
|
| 659 |
+
if (bytesToReceive == 0) {
|
| 660 |
+
return;
|
| 661 |
+
}
|
| 662 |
+
|
| 663 |
+
auto currentBytes = reinterpret_cast<char*>(buffer);
|
| 664 |
+
|
| 665 |
+
while (bytesToReceive > 0) {
|
| 666 |
+
ssize_t bytesReceived = 0;
|
| 667 |
+
SYSCHECK_ERR_RETURN_NEG1(
|
| 668 |
+
bytesReceived = recv(socket, currentBytes, bytesToReceive, 0))
|
| 669 |
+
if (bytesReceived == 0) {
|
| 670 |
+
C10_THROW_ERROR(DistNetworkError, "failed to recv, got 0 bytes");
|
| 671 |
+
}
|
| 672 |
+
|
| 673 |
+
bytesToReceive -= bytesReceived;
|
| 674 |
+
currentBytes += bytesReceived;
|
| 675 |
+
}
|
| 676 |
+
}
|
| 677 |
+
|
| 678 |
+
// send a vector's length and data
|
| 679 |
+
template <typename T>
|
| 680 |
+
void sendVector(int socket, const std::vector<T>& vec, bool moreData = false) {
|
| 681 |
+
SizeType size = vec.size();
|
| 682 |
+
sendBytes<SizeType>(socket, &size, 1, true);
|
| 683 |
+
sendBytes<T>(socket, vec.data(), size, moreData);
|
| 684 |
+
}
|
| 685 |
+
|
| 686 |
+
// receive a vector as sent in sendVector
|
| 687 |
+
template <typename T>
|
| 688 |
+
std::vector<T> recvVector(int socket) {
|
| 689 |
+
SizeType valueSize = 0;
|
| 690 |
+
recvBytes<SizeType>(socket, &valueSize, 1);
|
| 691 |
+
std::vector<T> value(valueSize);
|
| 692 |
+
recvBytes<T>(socket, value.data(), value.size());
|
| 693 |
+
return value;
|
| 694 |
+
}
|
| 695 |
+
|
| 696 |
+
// this is only for convenience when sending rvalues
|
| 697 |
+
template <typename T>
|
| 698 |
+
void sendValue(int socket, const T& value, bool moreData = false) {
|
| 699 |
+
sendBytes<T>(socket, &value, 1, moreData);
|
| 700 |
+
}
|
| 701 |
+
|
| 702 |
+
template <typename T>
|
| 703 |
+
T recvValue(int socket) {
|
| 704 |
+
T value;
|
| 705 |
+
recvBytes<T>(socket, &value, 1);
|
| 706 |
+
return value;
|
| 707 |
+
}
|
| 708 |
+
|
| 709 |
+
// send a string's length and data
|
| 710 |
+
inline void sendString(
|
| 711 |
+
int socket,
|
| 712 |
+
const std::string& str,
|
| 713 |
+
bool moreData = false) {
|
| 714 |
+
SizeType size = str.size();
|
| 715 |
+
sendBytes<SizeType>(socket, &size, 1, true);
|
| 716 |
+
sendBytes<char>(socket, str.data(), size, moreData);
|
| 717 |
+
}
|
| 718 |
+
|
| 719 |
+
// receive a string as sent in sendString
|
| 720 |
+
inline std::string recvString(int socket) {
|
| 721 |
+
SizeType valueSize = 0;
|
| 722 |
+
recvBytes<SizeType>(socket, &valueSize, 1);
|
| 723 |
+
std::vector<char> value(valueSize);
|
| 724 |
+
recvBytes<char>(socket, value.data(), value.size());
|
| 725 |
+
return std::string(value.data(), value.size());
|
| 726 |
+
}
|
| 727 |
+
|
| 728 |
+
} // namespace tcputil
|
| 729 |
+
} // namespace c10d
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Work.hpp
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/ATen.h>
|
| 4 |
+
#include <chrono>
|
| 5 |
+
#include <mutex>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
constexpr auto kNoTimeout = std::chrono::milliseconds(0);
|
| 9 |
+
|
| 10 |
+
namespace c10d {
|
| 11 |
+
|
| 12 |
+
constexpr const char* const kSeqNumStoreKey = "SEQ_NUM_STORE_KEY";
|
| 13 |
+
|
| 14 |
+
enum class OpType : std::uint8_t {
|
| 15 |
+
BROADCAST = 0,
|
| 16 |
+
ALLREDUCE = 1,
|
| 17 |
+
ALLREDUCE_COALESCED = 2,
|
| 18 |
+
REDUCE = 3,
|
| 19 |
+
ALLGATHER = 4,
|
| 20 |
+
_ALLGATHER_BASE = 5,
|
| 21 |
+
ALLGATHER_COALESCED = 6,
|
| 22 |
+
GATHER = 7,
|
| 23 |
+
SCATTER = 8,
|
| 24 |
+
REDUCE_SCATTER = 9,
|
| 25 |
+
ALLTOALL_BASE = 10,
|
| 26 |
+
ALLTOALL = 11,
|
| 27 |
+
SEND = 12,
|
| 28 |
+
RECV = 13,
|
| 29 |
+
RECVANYSOURCE = 14,
|
| 30 |
+
BARRIER = 15,
|
| 31 |
+
_REDUCE_SCATTER_BASE = 16,
|
| 32 |
+
COALESCED = 17,
|
| 33 |
+
_ALLREDUCE_SPARSE = 18,
|
| 34 |
+
UNKNOWN = 100,
|
| 35 |
+
};
|
| 36 |
+
|
| 37 |
+
// Converts OpType to human readable string.
|
| 38 |
+
TORCH_API std::string opTypeToString(OpType opType);
|
| 39 |
+
|
| 40 |
+
// Whether or not an OP is an p2p op (SEND, RECV, RECVANYSOURCE)
|
| 41 |
+
TORCH_API bool isP2POp(OpType opType, bool batchP2P = false);
|
| 42 |
+
|
| 43 |
+
// Please do not use Work API, it is going away, to be
|
| 44 |
+
// replaced by ivalue::Future.
|
| 45 |
+
// Python binding for this class might change, please do not assume
|
| 46 |
+
// this will be bound using pybind.
|
| 47 |
+
class TORCH_API Work : public torch::CustomClassHolder {
|
| 48 |
+
public:
|
| 49 |
+
Work(
|
| 50 |
+
int rank = -1,
|
| 51 |
+
OpType opType = OpType::UNKNOWN,
|
| 52 |
+
const char* profilingTitle = nullptr,
|
| 53 |
+
const std::optional<std::vector<at::Tensor>>& inputTensors =
|
| 54 |
+
std::nullopt);
|
| 55 |
+
|
| 56 |
+
~Work() override;
|
| 57 |
+
|
| 58 |
+
// Checks if request has completed. Non-blocking operation.
|
| 59 |
+
virtual bool isCompleted();
|
| 60 |
+
|
| 61 |
+
// Returns if the work completed successfully.
|
| 62 |
+
// If false, the exception function can be called to get details.
|
| 63 |
+
virtual bool isSuccess() const;
|
| 64 |
+
|
| 65 |
+
// Returns exception if isSuccess() returned false.
|
| 66 |
+
virtual std::exception_ptr exception() const;
|
| 67 |
+
|
| 68 |
+
// Returns source rank if this objects represents a recv-from-any.
|
| 69 |
+
virtual int sourceRank() const;
|
| 70 |
+
|
| 71 |
+
// Returns result tensors, if applicable.
|
| 72 |
+
// If work is not supposed to have result, we return empty list.
|
| 73 |
+
virtual std::vector<at::Tensor> result();
|
| 74 |
+
|
| 75 |
+
// Ensures that operations on the output tensors that are invoked
|
| 76 |
+
// after this function returns are correctly sequenced after the
|
| 77 |
+
// asynchronous completion of this work.
|
| 78 |
+
//
|
| 79 |
+
// For CUDA tensors, it inserts stream synchronization such that
|
| 80 |
+
// the streams of the caller wait for completion of the
|
| 81 |
+
// asynchronous operations on the destination tensors.
|
| 82 |
+
//
|
| 83 |
+
// For CPU tensors, it is currently a nop.
|
| 84 |
+
//
|
| 85 |
+
// This function should only be used if the caller polls for
|
| 86 |
+
// completion through the `isCompleted` function, it has returned
|
| 87 |
+
// true, and the `isSuccess` function also has returned true.
|
| 88 |
+
//
|
| 89 |
+
virtual void synchronize();
|
| 90 |
+
|
| 91 |
+
// Waits until request completes. Blocking operation.
|
| 92 |
+
// Throws if the work completed with an exception.
|
| 93 |
+
// Returns false if the work is aborted.
|
| 94 |
+
// Otherwise, it always returns true, indicating the work is completed.
|
| 95 |
+
//
|
| 96 |
+
// Functionally equivalent to:
|
| 97 |
+
//
|
| 98 |
+
// while (!isCompleted()) { /* nop */ }
|
| 99 |
+
// auto success = isSuccess();
|
| 100 |
+
// if (!success) { std::rethrow_exception(exception()); }
|
| 101 |
+
// return success;
|
| 102 |
+
//
|
| 103 |
+
virtual bool wait(std::chrono::milliseconds timeout = kNoTimeout);
|
| 104 |
+
|
| 105 |
+
virtual void abort();
|
| 106 |
+
|
| 107 |
+
// Returns a Future object that will be associated with the completion of
|
| 108 |
+
// work. Only NCCL backend is currently supported.
|
| 109 |
+
virtual c10::intrusive_ptr<c10::ivalue::Future> getFuture();
|
| 110 |
+
|
| 111 |
+
virtual float getDuration() const;
|
| 112 |
+
|
| 113 |
+
virtual uint64_t getSequencenumber() const;
|
| 114 |
+
|
| 115 |
+
OpType retrieveOpType() const;
|
| 116 |
+
|
| 117 |
+
static c10::intrusive_ptr<Work> create_from_future(
|
| 118 |
+
const c10::intrusive_ptr<c10::ivalue::Future>&);
|
| 119 |
+
|
| 120 |
+
protected:
|
| 121 |
+
// Completes the work object and optionally sets the exception in a
|
| 122 |
+
// thread-safe manner. Notifies all waiting condition variables as well.
|
| 123 |
+
void finish(std::exception_ptr exception = nullptr);
|
| 124 |
+
|
| 125 |
+
// Similar to finish, but throws an exception if one is already set or
|
| 126 |
+
// provided by the user.
|
| 127 |
+
void finishAndThrow(std::exception_ptr exception);
|
| 128 |
+
|
| 129 |
+
mutable std::mutex mutex_;
|
| 130 |
+
std::condition_variable cv_;
|
| 131 |
+
bool completed_ = false;
|
| 132 |
+
std::exception_ptr exception_;
|
| 133 |
+
|
| 134 |
+
// Current rank of the node.
|
| 135 |
+
const int rank_;
|
| 136 |
+
|
| 137 |
+
// Operation type that this work object refers to.
|
| 138 |
+
OpType opType_;
|
| 139 |
+
|
| 140 |
+
// When profiling, the callback to record end of operation event. This
|
| 141 |
+
// callback needs to be called when collective operation is complete.
|
| 142 |
+
std::function<void()> recordFunctionEndCallback_;
|
| 143 |
+
};
|
| 144 |
+
|
| 145 |
+
struct TORCH_API WorkInfo {
|
| 146 |
+
WorkInfo(
|
| 147 |
+
const OpType& opType,
|
| 148 |
+
const uint64_t seq,
|
| 149 |
+
const std::chrono::time_point<std::chrono::system_clock>& timeStarted,
|
| 150 |
+
const std::chrono::time_point<std::chrono::system_clock>& timeFinished,
|
| 151 |
+
const std::chrono::duration<float>& activeDuration)
|
| 152 |
+
: opType(opType),
|
| 153 |
+
seq(seq),
|
| 154 |
+
timeStarted(timeStarted),
|
| 155 |
+
timeFinished(timeFinished),
|
| 156 |
+
activeDuration(activeDuration) {}
|
| 157 |
+
|
| 158 |
+
OpType opType;
|
| 159 |
+
uint64_t seq;
|
| 160 |
+
std::chrono::time_point<std::chrono::system_clock> timeStarted;
|
| 161 |
+
std::chrono::time_point<std::chrono::system_clock> timeFinished;
|
| 162 |
+
std::chrono::duration<float> activeDuration;
|
| 163 |
+
};
|
| 164 |
+
|
| 165 |
+
} // namespace c10d
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/debug.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Meta Platforms, Inc. and its affiliates.
|
| 2 |
+
// All rights reserved.
|
| 3 |
+
//
|
| 4 |
+
// This source code is licensed under the BSD-style license found in the
|
| 5 |
+
// LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
#pragma once
|
| 8 |
+
|
| 9 |
+
#include <c10/macros/Macros.h>
|
| 10 |
+
|
| 11 |
+
namespace c10d {
|
| 12 |
+
|
| 13 |
+
enum class DebugLevel { Off = 0, Info = 1, Detail = 2 };
|
| 14 |
+
|
| 15 |
+
TORCH_API void setDebugLevel(DebugLevel level);
|
| 16 |
+
|
| 17 |
+
// Sets the debug level based on the value of the `TORCH_DISTRIBUTED_DEBUG`
|
| 18 |
+
// environment variable.
|
| 19 |
+
TORCH_API void setDebugLevelFromEnvironment();
|
| 20 |
+
|
| 21 |
+
TORCH_API DebugLevel debug_level() noexcept;
|
| 22 |
+
|
| 23 |
+
} // namespace c10d
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/error.h
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
// All rights reserved.
|
| 3 |
+
//
|
| 4 |
+
// This source code is licensed under the BSD-style license found in the
|
| 5 |
+
// LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
#pragma once
|
| 8 |
+
|
| 9 |
+
#include <cstring>
|
| 10 |
+
#include <system_error>
|
| 11 |
+
|
| 12 |
+
#include <fmt/format.h>
|
| 13 |
+
|
| 14 |
+
namespace fmt {
|
| 15 |
+
|
| 16 |
+
template <>
|
| 17 |
+
struct formatter<std::error_category> {
|
| 18 |
+
constexpr decltype(auto) parse(format_parse_context& ctx) const {
|
| 19 |
+
return ctx.begin();
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
template <typename FormatContext>
|
| 23 |
+
decltype(auto) format(const std::error_category& cat, FormatContext& ctx)
|
| 24 |
+
const {
|
| 25 |
+
if (std::strcmp(cat.name(), "generic") == 0) {
|
| 26 |
+
return fmt::format_to(ctx.out(), "errno");
|
| 27 |
+
} else {
|
| 28 |
+
return fmt::format_to(ctx.out(), "{} error", cat.name());
|
| 29 |
+
}
|
| 30 |
+
}
|
| 31 |
+
};
|
| 32 |
+
|
| 33 |
+
template <>
|
| 34 |
+
struct formatter<std::error_code> {
|
| 35 |
+
constexpr decltype(auto) parse(format_parse_context& ctx) const {
|
| 36 |
+
return ctx.begin();
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
template <typename FormatContext>
|
| 40 |
+
decltype(auto) format(const std::error_code& err, FormatContext& ctx) const {
|
| 41 |
+
return fmt::format_to(
|
| 42 |
+
ctx.out(), "({}: {} - {})", err.category(), err.value(), err.message());
|
| 43 |
+
}
|
| 44 |
+
};
|
| 45 |
+
|
| 46 |
+
} // namespace fmt
|
| 47 |
+
|
| 48 |
+
namespace c10d {
|
| 49 |
+
namespace detail {
|
| 50 |
+
|
| 51 |
+
inline std::error_code lastError() noexcept {
|
| 52 |
+
return std::error_code{errno, std::generic_category()};
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
} // namespace detail
|
| 56 |
+
} // namespace c10d
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer.hpp
ADDED
|
@@ -0,0 +1,587 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/ScalarType.h>
|
| 4 |
+
#include <atomic>
|
| 5 |
+
#include <memory>
|
| 6 |
+
#include <mutex>
|
| 7 |
+
#include <tuple>
|
| 8 |
+
#include <unordered_map>
|
| 9 |
+
#include <vector>
|
| 10 |
+
|
| 11 |
+
#include <ATen/core/ivalue_inl.h>
|
| 12 |
+
#include <c10/macros/Macros.h>
|
| 13 |
+
#include <c10/util/intrusive_ptr.h>
|
| 14 |
+
#include <torch/csrc/autograd/function.h>
|
| 15 |
+
#include <torch/csrc/autograd/profiler.h>
|
| 16 |
+
#include <torch/csrc/autograd/variable.h>
|
| 17 |
+
#include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
|
| 18 |
+
#include <torch/csrc/distributed/c10d/Utils.hpp>
|
| 19 |
+
#include <torch/csrc/distributed/c10d/comm.hpp>
|
| 20 |
+
#include <torch/csrc/distributed/c10d/debug.h>
|
| 21 |
+
#include <torch/csrc/distributed/c10d/default_comm_hooks.hpp>
|
| 22 |
+
#include <torch/csrc/distributed/c10d/reducer_timer.hpp>
|
| 23 |
+
#ifndef _WIN32
|
| 24 |
+
#include <torch/csrc/distributed/autograd/context/context.h>
|
| 25 |
+
#endif
|
| 26 |
+
|
| 27 |
+
namespace c10d {
|
| 28 |
+
|
| 29 |
+
constexpr int kDefaultFirstBucketBytes = int(1024 * 1024);
|
| 30 |
+
constexpr int kDefaultBucketBytesCap = int(25 * 1024 * 1024);
|
| 31 |
+
// Collect runtime stats once for every kDDPRuntimeLoggingSampleRate iterations.
|
| 32 |
+
constexpr int kDDPRuntimeLoggingSampleRate = 100;
|
| 33 |
+
|
| 34 |
+
// Forward declaration
|
| 35 |
+
class Logger;
|
| 36 |
+
|
| 37 |
+
// Local accumulator type for a single bucket.
|
| 38 |
+
struct BucketAccumulator {
|
| 39 |
+
std::vector<size_t> indices;
|
| 40 |
+
size_t size = 0;
|
| 41 |
+
size_t size_limit = 0;
|
| 42 |
+
};
|
| 43 |
+
|
| 44 |
+
class TORCH_API Reducer {
|
| 45 |
+
public:
|
| 46 |
+
// The constructor takes a list of variables (i.e. parameters) for this
|
| 47 |
+
// process's single model replica (as DDP assumes single-process
|
| 48 |
+
// single-device). The bucket assignment for this reducer, `bucket_indices`,
|
| 49 |
+
// is specified as a list of buckets, each of which is specified as a list of
|
| 50 |
+
// indices into the bucket's `variables` list.
|
| 51 |
+
explicit Reducer(
|
| 52 |
+
std::vector<at::Tensor> params,
|
| 53 |
+
std::vector<std::vector<size_t>> bucket_indices,
|
| 54 |
+
const std::vector<size_t>& per_bucket_size_limits,
|
| 55 |
+
c10::intrusive_ptr<c10d::ProcessGroup> process_group,
|
| 56 |
+
std::vector<bool> expect_sparse_gradients,
|
| 57 |
+
int64_t bucket_bytes_cap,
|
| 58 |
+
bool find_unused_parameters,
|
| 59 |
+
bool gradient_as_bucket_view,
|
| 60 |
+
std::unordered_map<size_t, std::string> param_names,
|
| 61 |
+
int64_t first_bucket_bytes_cap);
|
| 62 |
+
|
| 63 |
+
~Reducer() noexcept(false);
|
| 64 |
+
|
| 65 |
+
// To (re-)initialize bucket assignment, pass a list of buckets, each of
|
| 66 |
+
// which is specified by a list of indices in the bucket's `variables` list.
|
| 67 |
+
// This function performs validation that the variables within a bucket
|
| 68 |
+
// all live on the same device and have the same dimensionality.
|
| 69 |
+
void initialize_buckets(std::vector<std::vector<size_t>> bucket_indices);
|
| 70 |
+
|
| 71 |
+
void autograd_hook(size_t index);
|
| 72 |
+
|
| 73 |
+
// This function is called when the forward function has produced an output,
|
| 74 |
+
// and the user wishes to reduce gradients in the backwards pass.
|
| 75 |
+
// If they don't, and wish to accumulate gradients before reducing them,
|
| 76 |
+
// a call to this function can simply be omitted.
|
| 77 |
+
void prepare_for_backward(const std::vector<at::Tensor>& outputs);
|
| 78 |
+
|
| 79 |
+
// Called at the beginning of forward() inside DistributedDataParallel,
|
| 80 |
+
// right now it captures the starting time of forward in each iteration.
|
| 81 |
+
void prepare_for_forward();
|
| 82 |
+
|
| 83 |
+
// Returns the relative time in nanoseconds when gradients were ready,
|
| 84 |
+
// with respect to the time `prepare_for_backward` was called. The
|
| 85 |
+
// vector is for parameters for a single model replica.
|
| 86 |
+
std::vector<int64_t> get_backward_stats() const {
|
| 87 |
+
return backward_stats_;
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
// Registers a hook to the reducer. The hook is `CommHookInterface`
|
| 91 |
+
// type to allow both Python and CPP hooks. This function can only
|
| 92 |
+
// be called once before calling backward.
|
| 93 |
+
// Cannot combine with the call of `register_builtin_comm_hook`.
|
| 94 |
+
void register_comm_hook(std::unique_ptr<CommHookInterface> iface);
|
| 95 |
+
|
| 96 |
+
// Registers a built-in C++ comm hook to the reducer. This function can only
|
| 97 |
+
// be called once before calling backward.
|
| 98 |
+
// Cannot combine with the call of `register_comm_hook`.
|
| 99 |
+
void register_builtin_comm_hook(c10d::BuiltinCommHookType comm_hook_type);
|
| 100 |
+
|
| 101 |
+
// Informs reducer that optimizer is running in backward, so gradients
|
| 102 |
+
// don't need to be copied from buckets as the optimizer would've already
|
| 103 |
+
// been applied.
|
| 104 |
+
void set_optimizer_in_backward() {
|
| 105 |
+
optim_in_backward_ = true;
|
| 106 |
+
};
|
| 107 |
+
|
| 108 |
+
// Runs allreduce or installed communication hook given GradBucket instance.
|
| 109 |
+
c10::intrusive_ptr<c10::ivalue::Future> run_comm_hook(
|
| 110 |
+
GradBucket& grad_bucket);
|
| 111 |
+
|
| 112 |
+
// Runs default allreduce hook.
|
| 113 |
+
c10::intrusive_ptr<c10::ivalue::Future> run_allreduce_hook(
|
| 114 |
+
GradBucket& grad_bucket);
|
| 115 |
+
|
| 116 |
+
// Returns gradient buckets in sequential order of buckets_. This is the order
|
| 117 |
+
// in which buckets are reduced across processes. If return_zero_tensors=true,
|
| 118 |
+
// will return zero tensors of the same shape instead of the true tensors.
|
| 119 |
+
std::vector<c10d::GradBucket> get_grad_buckets(
|
| 120 |
+
bool return_zero_tensors = true) const;
|
| 121 |
+
|
| 122 |
+
// Rebuild buckets based on rebuilt_params_ and rebuilt_param_indices_
|
| 123 |
+
// according to when tensors received grads in the backward pass.
|
| 124 |
+
// TODO this function makes broadcast communication call and
|
| 125 |
+
// could be overlapped with next forward() call, thus
|
| 126 |
+
// it could be async. Will make it async when rebuilding buckets for
|
| 127 |
+
// find_unused_parameters = true case, as we could rebuild buckets more than
|
| 128 |
+
// once for find_unused_parameters = true case, where subgraphs are trained
|
| 129 |
+
// and parameter indices order may change more frequently.
|
| 130 |
+
// For find_unused_parameters = false case, buckets are only rebuilt once,
|
| 131 |
+
// the performance cost is negligible. Returns true if the buckets were
|
| 132 |
+
// rebuilt.
|
| 133 |
+
bool rebuild_buckets();
|
| 134 |
+
|
| 135 |
+
void setSparseMetadata(std::map<std::string, at::Tensor>& metadata);
|
| 136 |
+
|
| 137 |
+
// Install futures that should be awaited at end of backwards. Currently these
|
| 138 |
+
// are only used by user-defined custom buffer reduction hooks, but can be
|
| 139 |
+
// generalized to any user-originating futures that need to be awaited.
|
| 140 |
+
void install_futures(c10::List<c10::intrusive_ptr<c10::ivalue::Future>> futs);
|
| 141 |
+
|
| 142 |
+
// Returns true if we should rebuild buckets, else false. We only rebuild
|
| 143 |
+
// buckets once after the first iteration and never rebuild them if
|
| 144 |
+
// find_unused_parameters_.
|
| 145 |
+
inline bool should_rebuild_buckets() const {
|
| 146 |
+
return (static_graph_ || !find_unused_parameters_) && !has_rebuilt_bucket_;
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
// Pushes all parameters to be rebuilt.
|
| 150 |
+
void push_rebuilt_params_for_all_indices();
|
| 151 |
+
|
| 152 |
+
// Creates and sets ForwardPassWorkHandle given a Work and the
|
| 153 |
+
// corresponding tensor being reduced.
|
| 154 |
+
void set_forward_pass_work_handle(
|
| 155 |
+
c10::intrusive_ptr<c10d::Work> forwardPassWorkHandle,
|
| 156 |
+
bool useStaticWorldSize);
|
| 157 |
+
|
| 158 |
+
// Retrieve on-device tensors used to track locally unused parameters. It is
|
| 159 |
+
// a tensor where index i = 1 if the Variable with that index has been used.
|
| 160 |
+
at::Tensor get_local_used_map_on_device() const;
|
| 161 |
+
|
| 162 |
+
// An function for users to set sample_rate of collecting
|
| 163 |
+
// runtime stats. The time stats will be recorded for the
|
| 164 |
+
// first 10 iterations, after 10 iterations time stats will be
|
| 165 |
+
// recorded once every "sample_rate" training iterations.
|
| 166 |
+
void set_ddp_runtime_logging_sample_rate(int sample_rate);
|
| 167 |
+
|
| 168 |
+
// Specify the training graph is static.
|
| 169 |
+
void set_static_graph();
|
| 170 |
+
|
| 171 |
+
// Delay all reduce to be after all gradients' calculation is complete.
|
| 172 |
+
void delay_all_reduce();
|
| 173 |
+
|
| 174 |
+
void set_mixed_precision_param_dtype(c10::ScalarType dtype);
|
| 175 |
+
|
| 176 |
+
// Weak reference to associated DDP logger. The reference is weak to avoid
|
| 177 |
+
// refcycle between reducer and logger.
|
| 178 |
+
void set_logger(std::weak_ptr<c10d::Logger> logger);
|
| 179 |
+
|
| 180 |
+
// When graph is not explicitly set by user as static and has unused
|
| 181 |
+
// parameters, this will return whether the graph has been static until the
|
| 182 |
+
// current iteration, which means unused params set has not changed.
|
| 183 |
+
bool ddp_graph_static();
|
| 184 |
+
|
| 185 |
+
// Removes autograd hooks registered by the Reducer on the model parameters.
|
| 186 |
+
void remove_autograd_hooks();
|
| 187 |
+
|
| 188 |
+
// Checks whether or not the reducer has finalized the current backward
|
| 189 |
+
// iteration.
|
| 190 |
+
void check_finalized();
|
| 191 |
+
|
| 192 |
+
// Updates the underlying process group used by DDP with the new process
|
| 193 |
+
// group.
|
| 194 |
+
void update_process_group(
|
| 195 |
+
c10::intrusive_ptr<c10d::ProcessGroup> new_process_group);
|
| 196 |
+
|
| 197 |
+
// Resets reducer state.
|
| 198 |
+
void reset_state();
|
| 199 |
+
|
| 200 |
+
protected:
|
| 201 |
+
// Forward declaration.
|
| 202 |
+
struct Bucket;
|
| 203 |
+
|
| 204 |
+
void push_rebuilt_params(const size_t& index);
|
| 205 |
+
|
| 206 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 207 |
+
mutable std::mutex mutex_;
|
| 208 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 209 |
+
const std::vector<at::Tensor> params_;
|
| 210 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 211 |
+
c10::intrusive_ptr<::c10d::ProcessGroup> process_group_;
|
| 212 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 213 |
+
std::vector<bool> expect_sparse_gradients_;
|
| 214 |
+
|
| 215 |
+
std::vector<std::shared_ptr<torch::autograd::Node>>
|
| 216 |
+
grad_accumulators_; // NOLINT(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 217 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 218 |
+
std::unordered_map<torch::autograd::Node*, size_t> gradAccToVariableMap_;
|
| 219 |
+
std::vector<std::pair<uintptr_t, std::shared_ptr<torch::autograd::Node>>>
|
| 220 |
+
hooks_; // NOLINT(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 221 |
+
|
| 222 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 223 |
+
bool expect_autograd_hooks_;
|
| 224 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 225 |
+
bool require_finalize_;
|
| 226 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 227 |
+
size_t next_bucket_;
|
| 228 |
+
|
| 229 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 230 |
+
bool has_marked_unused_parameters_;
|
| 231 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 232 |
+
const bool find_unused_parameters_;
|
| 233 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 234 |
+
const bool gradient_as_bucket_view_;
|
| 235 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 236 |
+
std::vector<size_t> unused_parameters_;
|
| 237 |
+
// Previous iteration's unused params, used for checking if unused parameters
|
| 238 |
+
// change between iterations. Only filled during the first backwards call.
|
| 239 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 240 |
+
std::vector<size_t> prev_iteration_unused_parameters_;
|
| 241 |
+
// Whether graph is static or not. When user does not explicitly set static
|
| 242 |
+
// graph, the only possible dynamism is set of unused parameters changing
|
| 243 |
+
// between iterations which is tracked by this flag.
|
| 244 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 245 |
+
bool ddp_graph_static_{true};
|
| 246 |
+
// Locally used parameter maps indicating if parameters are used locally
|
| 247 |
+
// during the current iteration or no_sync session if no_sync is on.
|
| 248 |
+
// Each map is a one-dim int32 tensor of number of parameters. These tensors
|
| 249 |
+
// are marked in autograd_hook to indicate the corresponding param has been
|
| 250 |
+
// used, and get allreduced in the end of backward step of current iteration
|
| 251 |
+
// or no_sync session for figuring out the globally unused parameters.
|
| 252 |
+
//
|
| 253 |
+
// local_used_map_: CPU tensor for bookkeeping locally used params
|
| 254 |
+
// local_used_map_dev_: dev tensor for reducing globally unused params
|
| 255 |
+
at::Tensor local_used_map_;
|
| 256 |
+
at::Tensor local_used_map_dev_;
|
| 257 |
+
// Indicate that reduction is done and D2H copy is done as well.
|
| 258 |
+
bool local_used_map_reduced_;
|
| 259 |
+
|
| 260 |
+
// Weak pointer to associated DDP logger.
|
| 261 |
+
std::weak_ptr<c10d::Logger> logger_;
|
| 262 |
+
// List of futures installed by Reducer::install_futures that should be
|
| 263 |
+
// awaited at the end of backwards pass.
|
| 264 |
+
std::optional<c10::List<c10::intrusive_ptr<c10::ivalue::Future>>>
|
| 265 |
+
installed_futures_{std::nullopt};
|
| 266 |
+
// Mixed precision parameter dtype for bucket type checking.
|
| 267 |
+
std::optional<c10::ScalarType> mixed_precision_param_dtype_{std::nullopt};
|
| 268 |
+
|
| 269 |
+
// Work handle for allreduce on local_used_map_
|
| 270 |
+
c10::intrusive_ptr<c10d::Work> local_used_work_;
|
| 271 |
+
|
| 272 |
+
void mark_variable_ready_dense(size_t variable_index);
|
| 273 |
+
|
| 274 |
+
void mark_variable_ready_sparse(size_t variable_index);
|
| 275 |
+
|
| 276 |
+
void mark_variable_ready(size_t variable_index);
|
| 277 |
+
|
| 278 |
+
void mark_bucket_ready(size_t bucket_index);
|
| 279 |
+
|
| 280 |
+
void finalize_bucket_dense(Bucket& bucket);
|
| 281 |
+
|
| 282 |
+
void finalize_backward();
|
| 283 |
+
|
| 284 |
+
// Returns list of model parameters corresponding to the given bucket.
|
| 285 |
+
// bucket_index is a key to cache after buckets are rebuilt, after which this
|
| 286 |
+
// mapping never changes.
|
| 287 |
+
std::vector<at::Tensor> get_variables_for_bucket(
|
| 288 |
+
size_t bucket_index,
|
| 289 |
+
const Bucket& bucket) const;
|
| 290 |
+
|
| 291 |
+
// Asserts that the reduction for the previous iteration has finished before
|
| 292 |
+
// rebuilding buckets or kicking off the next one.
|
| 293 |
+
void ensure_prior_reduction_finished();
|
| 294 |
+
|
| 295 |
+
// Broadcast rebuilt buckets from rank 0 to other ranks before initializing
|
| 296 |
+
// the buckets
|
| 297 |
+
void sync_bucket_indices(std::vector<std::vector<size_t>>& bucket_indices);
|
| 298 |
+
|
| 299 |
+
// We'd like to use DistAutogradContext::GradCallback here but dist autograd
|
| 300 |
+
// doesn't exist under Windows. So we just directly use the concrete type but
|
| 301 |
+
// to preserve and enforce our original intent we do a static assert when dist
|
| 302 |
+
// autograd is available.
|
| 303 |
+
using GradCallback = std::function<bool(at::Tensor&)>;
|
| 304 |
+
#ifndef _WIN32
|
| 305 |
+
static_assert(
|
| 306 |
+
std::is_same_v<
|
| 307 |
+
GradCallback,
|
| 308 |
+
torch::distributed::autograd::DistAutogradContext::GradCallback>);
|
| 309 |
+
#endif
|
| 310 |
+
void runGradCallbackForVariable(at::Tensor& variable, GradCallback&& cb);
|
| 311 |
+
|
| 312 |
+
// This function is called inside `initialize_buckets()`. It initializes both
|
| 313 |
+
// `bucket_views_in` and `bucket_views_out` with views for each variable's
|
| 314 |
+
// gradient into the bucket's flattened `gradients` tensor. Views serve as
|
| 315 |
+
// entry points to `copy_()` each grad's data in/out of the flattened
|
| 316 |
+
// `gradients` tensor.
|
| 317 |
+
void initialize_bucket_views(Bucket& bucket);
|
| 318 |
+
|
| 319 |
+
// This function is called inside `finalize_backward`, it happens only if
|
| 320 |
+
// DDP communication hook was registered to recreate just bucket_views_out
|
| 321 |
+
// with the result of `future_work`.
|
| 322 |
+
void populate_bucket_views_out(Bucket& bucket, at::Tensor& tensor);
|
| 323 |
+
|
| 324 |
+
// If gradient_as_bucket_view_ is false, after allreduce buckets,
|
| 325 |
+
// copy bucket results back to grads.
|
| 326 |
+
void copy_bucket_to_grad(
|
| 327 |
+
at::Tensor& variable,
|
| 328 |
+
Reducer::Bucket& bucket,
|
| 329 |
+
size_t intra_bucket_index,
|
| 330 |
+
bool global_unused);
|
| 331 |
+
// Check layout of grad and bucket_view before copying the grad to bucket.
|
| 332 |
+
void check_grad_layout(const at::Tensor& grad, const at::Tensor& bucket_view);
|
| 333 |
+
|
| 334 |
+
// A bucket contains [1..N] gradients to be reduced, where the gradients
|
| 335 |
+
// have the same dtype and device.
|
| 336 |
+
// Coalescing gradients together before reducing can result in lower overhead
|
| 337 |
+
// and/or faster time to completion. Coalescing requires the constituent
|
| 338 |
+
// gradients to have the same dtype and device, and the resulting flattened
|
| 339 |
+
// tensor uses that common dtype and device. The flattened tensor is filled
|
| 340 |
+
// as the corresponding gradients are computed (triggered by autograd hooks),
|
| 341 |
+
// and the buckets are reduced in a predetermined order consistent across
|
| 342 |
+
// processes.
|
| 343 |
+
struct Bucket {
|
| 344 |
+
// Gradients of the bucket flattened into a 1-dimensional tensor
|
| 345 |
+
at::Tensor gradients;
|
| 346 |
+
|
| 347 |
+
// Views into the `gradients` tensor for each individual gradient
|
| 348 |
+
// Each view is created with layout (size and stride) matching the
|
| 349 |
+
// gradient's expected layout (see the "Gradient Layout Contract" in
|
| 350 |
+
// torch/csrc/autograd/functions/accumulate_grad.h).
|
| 351 |
+
// `bucket_views_in[i].copy_(grad)` and `grad.copy_(bucket_views_out[i])`
|
| 352 |
+
// provide convenient ways to copy gradient data in/out of `gradients`,
|
| 353 |
+
// respectively.
|
| 354 |
+
// We keep both `bucket_views_in` and `bucket_views_out` because
|
| 355 |
+
// registering a DDP communication hook may re-initialize
|
| 356 |
+
// `bucket_views_out` with the value of the hook's `future_work` but we
|
| 357 |
+
// still need separate views into the bucket's original flattened gradient
|
| 358 |
+
// to copy in gradient data.
|
| 359 |
+
std::vector<at::Tensor> bucket_views_in;
|
| 360 |
+
std::vector<at::Tensor> bucket_views_out;
|
| 361 |
+
|
| 362 |
+
// Variables whose gradients are held in this bucket
|
| 363 |
+
// We use refcounted tensors here so that we can easily unflatten the
|
| 364 |
+
// bucket's flattened `gradients` tensor into the participating variables
|
| 365 |
+
// after reduction has completed.
|
| 366 |
+
std::vector<at::Tensor> variables;
|
| 367 |
+
|
| 368 |
+
// Per-variable offset/length into the flattened `gradients` tensor and
|
| 369 |
+
// the corresponding `GradBucket` instance for communication hooks
|
| 370 |
+
std::vector<size_t> offsets;
|
| 371 |
+
std::vector<size_t> lengths;
|
| 372 |
+
|
| 373 |
+
// Per-variable sizes slicing into the bucket's `gradients` tensor
|
| 374 |
+
std::vector<c10::IntArrayRef> sizes_vec;
|
| 375 |
+
|
| 376 |
+
// Number of gradients left to be computed before the bucket is ready to
|
| 377 |
+
// be reduced
|
| 378 |
+
size_t pending;
|
| 379 |
+
|
| 380 |
+
// Global indices of participating variables in the bucket
|
| 381 |
+
std::vector<size_t> variable_indices;
|
| 382 |
+
|
| 383 |
+
// Future work handle for DDP communication hook
|
| 384 |
+
// If no hook is registered, a temporary vanilla allreduce hook is used.
|
| 385 |
+
c10::intrusive_ptr<at::ivalue::Future> future_work;
|
| 386 |
+
|
| 387 |
+
// If this bucket should expect a single sparse gradient
|
| 388 |
+
// If `true`, then this implies that `bucket.variables.size() == 1`.
|
| 389 |
+
bool expect_sparse_gradient = false;
|
| 390 |
+
|
| 391 |
+
// Sparse indices tensor
|
| 392 |
+
std::optional<at::Tensor> sparse_tensor_indices = std::nullopt;
|
| 393 |
+
|
| 394 |
+
// TODO(@pietern)
|
| 395 |
+
// Memory copies from gradient tensors into the bucket are potentially
|
| 396 |
+
// done on different CUDA streams. We record an event for every copy
|
| 397 |
+
// so that we can synchronize with them prior to kicking off the reduction.
|
| 398 |
+
// std::vector<at::cuda::CUDAEvent> events;
|
| 399 |
+
};
|
| 400 |
+
|
| 401 |
+
std::vector<Bucket> buckets_;
|
| 402 |
+
|
| 403 |
+
// A variable locator locates a particular variable in the reducer's buckets
|
| 404 |
+
struct VariableLocator {
|
| 405 |
+
// Index of the bucket containing the variable in the `buckets_` vector
|
| 406 |
+
size_t bucket_index;
|
| 407 |
+
// Index of the variable in the bucket, which may be used consistently
|
| 408 |
+
// across `bucket_views_in`, `bucket_views_out`, `variables`, `offsets`,
|
| 409 |
+
// `lengths`, `sizes_vec`, and `variable_indices` in `Bucket`
|
| 410 |
+
size_t intra_bucket_index;
|
| 411 |
+
|
| 412 |
+
VariableLocator() = default;
|
| 413 |
+
|
| 414 |
+
VariableLocator(size_t bucket_index_, size_t intra_bucket_index_)
|
| 415 |
+
: bucket_index(bucket_index_),
|
| 416 |
+
intra_bucket_index(intra_bucket_index_) {}
|
| 417 |
+
};
|
| 418 |
+
|
| 419 |
+
// Map the index of a variable to its location in the bucket structure.
|
| 420 |
+
std::vector<VariableLocator> variable_locators_;
|
| 421 |
+
|
| 422 |
+
// track the number of iterations to synchronize grads in training so far.
|
| 423 |
+
long num_iterations_;
|
| 424 |
+
// track distinct iteration of backward call. This is distinct from
|
| 425 |
+
// num_iterations_, for example in the case of multiple forward before
|
| 426 |
+
// backward.
|
| 427 |
+
long num_bwd_calls_;
|
| 428 |
+
// whether the first autograd hook for a distinct backward pass has been
|
| 429 |
+
// called.
|
| 430 |
+
bool first_autograd_hook_called_;
|
| 431 |
+
// track the number of buckets that have been ready for
|
| 432 |
+
// communication calls like allReduce or communication hooks.
|
| 433 |
+
int num_buckets_ready_;
|
| 434 |
+
|
| 435 |
+
// Timing information.
|
| 436 |
+
int64_t backward_compute_start_time_ = -1;
|
| 437 |
+
std::unique_ptr<Timer> timer_;
|
| 438 |
+
|
| 439 |
+
// We collect the relative timestamp of every gradient being ready
|
| 440 |
+
// when executing autograd. This can be used to derive a timeline of
|
| 441 |
+
// the point in time buckets were ready, or ideal bucket assignment/ordering.
|
| 442 |
+
std::vector<int64_t> backward_stats_;
|
| 443 |
+
|
| 444 |
+
bool should_collect_runtime_stats();
|
| 445 |
+
void record_forward_compute_start_time();
|
| 446 |
+
void record_backward_compute_start_time();
|
| 447 |
+
void record_backward_compute_end_time();
|
| 448 |
+
void record_backward_comm_start_time();
|
| 449 |
+
void record_backward_comm_end_time();
|
| 450 |
+
|
| 451 |
+
int get_ddp_runtime_logging_sample_rate();
|
| 452 |
+
int ddp_runtime_logging_sample_rate_ = kDDPRuntimeLoggingSampleRate;
|
| 453 |
+
|
| 454 |
+
bool is_multi_device_module_ = false;
|
| 455 |
+
|
| 456 |
+
// Following variables are to help build dynamic bucket order
|
| 457 |
+
bool has_rebuilt_bucket_;
|
| 458 |
+
std::vector<at::Tensor> rebuilt_params_;
|
| 459 |
+
std::vector<int64_t> rebuilt_param_indices_;
|
| 460 |
+
const int64_t bucket_bytes_cap_;
|
| 461 |
+
|
| 462 |
+
#ifndef _WIN32
|
| 463 |
+
struct RpcContext {
|
| 464 |
+
using ContextPtr = torch::distributed::autograd::ContextPtr;
|
| 465 |
+
// The shared_ptr is to hold the context instance.
|
| 466 |
+
ContextPtr context_ptr_holder;
|
| 467 |
+
std::atomic<ContextPtr::element_type*> context_ptr{nullptr};
|
| 468 |
+
|
| 469 |
+
void set(ContextPtr&& new_context_ptr);
|
| 470 |
+
};
|
| 471 |
+
RpcContext rpc_context_;
|
| 472 |
+
#endif
|
| 473 |
+
|
| 474 |
+
// A struct containing work handle and tensor for allreduce scheduled in
|
| 475 |
+
// forward pass, if applicable.
|
| 476 |
+
struct ForwardPassAllreduceWork {
|
| 477 |
+
c10::intrusive_ptr<c10d::Work> workHandle;
|
| 478 |
+
at::Tensor resultTensor;
|
| 479 |
+
// whether we should divide by the initial world_size or the no. of
|
| 480 |
+
// remaining DDP ranks.
|
| 481 |
+
bool useStaticWorldSize;
|
| 482 |
+
};
|
| 483 |
+
|
| 484 |
+
// Handle for the currently scheduled allreduce in the forward pass, if
|
| 485 |
+
// applicable.
|
| 486 |
+
ForwardPassAllreduceWork forwardPassWorkHandle_;
|
| 487 |
+
|
| 488 |
+
// Division factor for reduction of gradients.
|
| 489 |
+
// Equal to the process group size, with an exception of handling uneven
|
| 490 |
+
// input.
|
| 491 |
+
int div_factor_;
|
| 492 |
+
|
| 493 |
+
bool static_graph_;
|
| 494 |
+
|
| 495 |
+
// Key: size_t (index), Value: the number of times that a variable's
|
| 496 |
+
// autograd_hook() should be triggered before marking this variable's grad as
|
| 497 |
+
// ready for communication. Map will not change after 1st iteration.
|
| 498 |
+
std::unordered_map<size_t, int> numGradHooksTriggeredMap_;
|
| 499 |
+
// Key: size_t (index), Value: the number of times that a variable's
|
| 500 |
+
// autograd_hook() are left to be triggered before marking this variable's
|
| 501 |
+
// grad as ready for communication. Map will change after 1st iteration to
|
| 502 |
+
// track a grad is ready for communication or not.
|
| 503 |
+
std::unordered_map<size_t, int> numGradHooksTriggeredMapPerIteration_;
|
| 504 |
+
|
| 505 |
+
private:
|
| 506 |
+
// reset counting for buckets before backward starts
|
| 507 |
+
void reset_bucket_counting();
|
| 508 |
+
// search unused parameters beore backward starts
|
| 509 |
+
void search_unused_parameters(
|
| 510 |
+
const std::vector<torch::autograd::Variable>& outputs);
|
| 511 |
+
void set_divide_factor();
|
| 512 |
+
// kick off all reduce for the ready bucket
|
| 513 |
+
void all_reduce_bucket(Bucket& bucket);
|
| 514 |
+
// kick off all reduce to local used map, it can help find global unused
|
| 515 |
+
// parameters
|
| 516 |
+
void all_reduce_local_used_map();
|
| 517 |
+
// initialize locally used parameter maps
|
| 518 |
+
void initialize_local_used_map();
|
| 519 |
+
// get current cuda stream
|
| 520 |
+
const c10::Stream get_current_stream();
|
| 521 |
+
bool dynamic_graph_find_unused();
|
| 522 |
+
bool static_graph_first_iteration();
|
| 523 |
+
bool static_graph_after_first_iteration();
|
| 524 |
+
|
| 525 |
+
// comm_hook_ is used to access the DDP communication hook if registered.
|
| 526 |
+
std::unique_ptr<CommHookInterface> comm_hook_;
|
| 527 |
+
|
| 528 |
+
// Sparse metadata contains the indices that will be used
|
| 529 |
+
// when calling into sparse allreduce.
|
| 530 |
+
// This is only used in the sparse allreduce collective calls
|
| 531 |
+
std::unique_ptr<std::map<std::string, at::Tensor>> sparse_metadata_;
|
| 532 |
+
|
| 533 |
+
// Debug level setting. It is parsed once when Reducer is constructed, and
|
| 534 |
+
// remains the same across a single invocation of DDP training.
|
| 535 |
+
DebugLevel ddp_debug_level_;
|
| 536 |
+
// Mapping of variable index to fully qualified name of model to notify users
|
| 537 |
+
// about errors when certain parameters do not get gradient.
|
| 538 |
+
std::unordered_map<size_t, std::string> param_names_;
|
| 539 |
+
// Variable indices stored sequentially in order of when the gradient is ready
|
| 540 |
+
// for the current backwards pass.
|
| 541 |
+
std::vector<int64_t> grad_ready_order_indices_;
|
| 542 |
+
// Bytes capacity of first bucket, can be configured by user
|
| 543 |
+
int64_t first_bucket_bytes_cap_;
|
| 544 |
+
// Per iteration set of parameter indices that have been marked ready.
|
| 545 |
+
std::unordered_set<size_t> perIterationReadyParams_;
|
| 546 |
+
// Retrieves parameter names that have not been marked as ready as part of
|
| 547 |
+
// previous iteration.
|
| 548 |
+
std::vector<std::string> getUnmarkedParamsForIteration();
|
| 549 |
+
// Retrieves parameter indices that have not been marked as ready as part of
|
| 550 |
+
// previous iteration.
|
| 551 |
+
std::vector<size_t> getUnmarkedParamIndicesForIteration();
|
| 552 |
+
// Raises appropriate error if mark_variable_ready is called on the same
|
| 553 |
+
// variable twice, which is unexpected.
|
| 554 |
+
void checkAndRaiseMarkedTwiceError(size_t curVariableIndex);
|
| 555 |
+
// Retrieves parameter corresponding to the given VariableIndex.
|
| 556 |
+
at::Tensor& get_param_from_index(size_t index);
|
| 557 |
+
|
| 558 |
+
// Cached bucket index to model parameter mapping. Populated after buckets
|
| 559 |
+
// are rebuilt after which this mapping is static.
|
| 560 |
+
mutable std::unordered_map<size_t, std::vector<at::Tensor>>
|
| 561 |
+
cached_variables_for_bucket_;
|
| 562 |
+
|
| 563 |
+
bool optim_in_backward_{false};
|
| 564 |
+
friend class Logger;
|
| 565 |
+
};
|
| 566 |
+
|
| 567 |
+
// This is equivalent to take_tensors but returns indices into the
|
| 568 |
+
// tensor list argument for bucket assignment. Also, it is aware
|
| 569 |
+
// of device placement and will not allow buckets to span devices.
|
| 570 |
+
// The index of tensors[i] assigned to bucket is tensor_indices[i],
|
| 571 |
+
// when tensor_indices is empty, the index of tensors[i] assigned to
|
| 572 |
+
// bucket is i.
|
| 573 |
+
TORCH_API std::tuple<std::vector<std::vector<size_t>>, std::vector<size_t>>
|
| 574 |
+
compute_bucket_assignment_by_size(
|
| 575 |
+
const std::vector<at::Tensor>& tensors,
|
| 576 |
+
const std::vector<size_t>& bucket_size,
|
| 577 |
+
const std::vector<bool>& expect_sparse_gradient = {},
|
| 578 |
+
const std::vector<int64_t>& tensor_indices = {},
|
| 579 |
+
const std::optional<std::weak_ptr<c10d::Logger>>& logger = {});
|
| 580 |
+
|
| 581 |
+
// Verify models across all processes are the same as model on rank 0 with
|
| 582 |
+
// respect to no. of params and matching dtype/size/layout.
|
| 583 |
+
TORCH_API void verify_params_across_processes(
|
| 584 |
+
const c10::intrusive_ptr<c10d::ProcessGroup>& process_group,
|
| 585 |
+
const std::vector<at::Tensor>& params,
|
| 586 |
+
const std::optional<std::weak_ptr<c10d::Logger>>& logger);
|
| 587 |
+
} // namespace c10d
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/py_rref.h
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/rref_impl.h>
|
| 4 |
+
#include <torch/csrc/python_headers.h>
|
| 5 |
+
#include <torch/csrc/utils/pybind.h>
|
| 6 |
+
|
| 7 |
+
namespace torch {
|
| 8 |
+
namespace distributed {
|
| 9 |
+
namespace rpc {
|
| 10 |
+
|
| 11 |
+
enum RRefProxyType { RPC_SYNC, RPC_ASYNC, REMOTE };
|
| 12 |
+
|
| 13 |
+
// Python wrapper of an RRef shared_ptr that supports Python
|
| 14 |
+
// pickle and unpickle.
|
| 15 |
+
class PYBIND11_EXPORT PyRRef {
|
| 16 |
+
public:
|
| 17 |
+
// The first ctor can only be called while holding GIL. See its implementation
|
| 18 |
+
// for more explanations.
|
| 19 |
+
explicit PyRRef(const py::object& value, const py::object& type_hint);
|
| 20 |
+
explicit PyRRef(c10::intrusive_ptr<RRef> rref);
|
| 21 |
+
PyRRef(const PyRRef&) = default;
|
| 22 |
+
~PyRRef();
|
| 23 |
+
|
| 24 |
+
bool isOwner() const;
|
| 25 |
+
bool confirmedByOwner() const;
|
| 26 |
+
WorkerInfo owner() const;
|
| 27 |
+
std::string ownerName() const;
|
| 28 |
+
py::object toHere(
|
| 29 |
+
const float timeoutSeconds =
|
| 30 |
+
torch::distributed::rpc::kUnsetRpcTimeout) const;
|
| 31 |
+
py::object localValue() const;
|
| 32 |
+
std::string str() const;
|
| 33 |
+
py::tuple pickle() const;
|
| 34 |
+
static PyRRef unpickle(const py::tuple& t);
|
| 35 |
+
c10::IValue toIValue() const;
|
| 36 |
+
// Future that is associated with the creation of this RRef on the remote end.
|
| 37 |
+
// This is only used to get the future corresponding to the rref for profiling
|
| 38 |
+
// use cases.
|
| 39 |
+
c10::intrusive_ptr<JitFuture> getFuture() const;
|
| 40 |
+
// Keeps track of the future responsible for profiling owner creation
|
| 41 |
+
// acknowledgement
|
| 42 |
+
c10::intrusive_ptr<JitFuture> getProfilingFuture() const;
|
| 43 |
+
// Sets the future responsible for profiling owner creation acknowledgement.
|
| 44 |
+
// This future is set from python to be a future that returns when profiling
|
| 45 |
+
// callbacks have been run.
|
| 46 |
+
void setProfilingFuture(c10::intrusive_ptr<JitFuture> profilingFuture);
|
| 47 |
+
|
| 48 |
+
// create a proxy on this RRef, which can be used to launch RPC on the owner
|
| 49 |
+
// of this RRef to run functions on the object referenced by this RRef.
|
| 50 |
+
py::object createRRefProxy(
|
| 51 |
+
const RRefProxyType& mode,
|
| 52 |
+
float timeoutSeconds = rpc::kUnsetRpcTimeout) const;
|
| 53 |
+
|
| 54 |
+
// get the type of the data object referenced by this RRef. Timeout argument
|
| 55 |
+
// is only used in the first invocation of this function as an argument to the
|
| 56 |
+
// RPC to the owner node of the RRef.
|
| 57 |
+
py::object getRRefType(
|
| 58 |
+
float timeout = rpc::kUnsetRpcTimeout,
|
| 59 |
+
bool blocking = true);
|
| 60 |
+
|
| 61 |
+
// Run the backward pass with the RRef as the root.
|
| 62 |
+
void backward(int64_t autogradContextId, bool retainGraph);
|
| 63 |
+
|
| 64 |
+
// Helper static function to run backward on a given rref.
|
| 65 |
+
static void backward(
|
| 66 |
+
int64_t autogradContextId,
|
| 67 |
+
bool retainGraph,
|
| 68 |
+
const c10::intrusive_ptr<RRef>& rref);
|
| 69 |
+
|
| 70 |
+
// Specialization of backward if the rref is an OwnerRRef.
|
| 71 |
+
static void backwardOwnerRRef(
|
| 72 |
+
int64_t autogradContextId,
|
| 73 |
+
bool retainGraph,
|
| 74 |
+
IValue value);
|
| 75 |
+
|
| 76 |
+
private:
|
| 77 |
+
c10::intrusive_ptr<RRef> rref_;
|
| 78 |
+
std::optional<c10::intrusive_ptr<JitFuture>> profilingFuture_;
|
| 79 |
+
std::optional<py::object> type_;
|
| 80 |
+
};
|
| 81 |
+
|
| 82 |
+
} // namespace rpc
|
| 83 |
+
} // namespace distributed
|
| 84 |
+
} // namespace torch
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_context.h
ADDED
|
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/rpc_agent.h>
|
| 5 |
+
#include <torch/csrc/distributed/rpc/rref_impl.h>
|
| 6 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 7 |
+
#include <torch/csrc/distributed/rpc/utils.h>
|
| 8 |
+
|
| 9 |
+
#include <atomic>
|
| 10 |
+
#include <optional>
|
| 11 |
+
|
| 12 |
+
namespace torch::distributed::rpc {
|
| 13 |
+
|
| 14 |
+
namespace callback {
|
| 15 |
+
// It's the callback for RemoteCall.
|
| 16 |
+
void TORCH_API
|
| 17 |
+
confirmPendingUser(const JitFuture& jitFuture, const ForkId& expectedForkId);
|
| 18 |
+
|
| 19 |
+
// It's the callback for finishing creating owner rref, it returned deletedRRef,
|
| 20 |
+
// so that the deletedRRef can be handled under GIL in python_functions.cpp if
|
| 21 |
+
// deletedRRef contains python object.
|
| 22 |
+
c10::intrusive_ptr<RRef> TORCH_API
|
| 23 |
+
finishCreatingOwnerRRef(const JitFuture& jitFuture, const RRefId& rrefId);
|
| 24 |
+
} // namespace callback
|
| 25 |
+
|
| 26 |
+
// Manages RRef lifetime and keeps track of RRef forks.
|
| 27 |
+
class TORCH_API RRefContext {
|
| 28 |
+
public:
|
| 29 |
+
static RRefContext& getInstance();
|
| 30 |
+
// NB: This method must be called before destructing RRefContext singleton.
|
| 31 |
+
// Similar to delForkOfOwner, this method returns a vector of OwnerRRefs that
|
| 32 |
+
// hold py::object. The call-site is also responsible for resetting those
|
| 33 |
+
// shared_ptr objects with a GIL. See comments at delForkOfOwner() for more
|
| 34 |
+
// details.
|
| 35 |
+
static std::vector<c10::intrusive_ptr<RRef>> destroyInstance(
|
| 36 |
+
bool ignoreRRefLeak = true);
|
| 37 |
+
|
| 38 |
+
static void handleException(const JitFuture& jitFuture);
|
| 39 |
+
|
| 40 |
+
// handle exception without throw ::c10::Error again
|
| 41 |
+
static void handleExceptionSilent(const JitFuture& jitFuture);
|
| 42 |
+
|
| 43 |
+
RRefContext(const RRefContext&) = delete;
|
| 44 |
+
RRefContext(RRefContext&& other) = delete;
|
| 45 |
+
void operator=(const RRefContext&) = delete;
|
| 46 |
+
RRefContext& operator=(RRefContext&& other) = delete;
|
| 47 |
+
|
| 48 |
+
~RRefContext();
|
| 49 |
+
|
| 50 |
+
// get the worker id of the current worker
|
| 51 |
+
inline worker_id_t getWorkerId() const {
|
| 52 |
+
return agent_->getWorkerInfo().id_;
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
// get the worker name of the current worker
|
| 56 |
+
inline const std::string& getWorkerName() const {
|
| 57 |
+
return agent_->getWorkerInfo().name_;
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
// generate a globally unique ID
|
| 61 |
+
inline GloballyUniqueId genGloballyUniqueId() {
|
| 62 |
+
return GloballyUniqueId(getWorkerId(), nextLocalId_++);
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
inline const std::shared_ptr<RpcAgent>& agent() const {
|
| 66 |
+
return agent_;
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
// create a ``UserRRef`` owned by the worker ``ownerId``
|
| 70 |
+
c10::intrusive_ptr<UserRRef> createUserRRef(
|
| 71 |
+
worker_id_t ownerId,
|
| 72 |
+
const TypePtr& type);
|
| 73 |
+
|
| 74 |
+
// Convert an RRefForkData into an RRef. This RRef could be user or owner.
|
| 75 |
+
// This RRef could have already existed before, or could be created in this
|
| 76 |
+
// method, we pass type here to validate or help the rref creation.
|
| 77 |
+
c10::intrusive_ptr<RRef> getOrCreateRRef(
|
| 78 |
+
const RRefForkData& rfd,
|
| 79 |
+
const TypePtr& type);
|
| 80 |
+
|
| 81 |
+
// Get the ``OwnerRRef`` of id ``rrefId``. If it does not exist, create a new
|
| 82 |
+
// one. This function is called in two places:
|
| 83 |
+
// 1. when processing ``rpc.remote()``, i.e., ``SCRIPT_REMOTE_CALL``
|
| 84 |
+
// ``PYTHON_REMOTE_CALL``.
|
| 85 |
+
// 2. when unpickling ``OwnerRRef``.
|
| 86 |
+
// What's common in these two cases are, 1) the RRefId is already generated
|
| 87 |
+
// 2) the TypePtr is presented. So it can always create the ``OwnerRRef`` if
|
| 88 |
+
// it is not yet available.
|
| 89 |
+
c10::intrusive_ptr<OwnerRRef> getOrCreateOwnerRRef(
|
| 90 |
+
const RRefId& rrefId,
|
| 91 |
+
const TypePtr& type);
|
| 92 |
+
|
| 93 |
+
// Create an empty owner rref of type.
|
| 94 |
+
// This method is called to first time generate an ``OwnerRRef``, e.g.,
|
| 95 |
+
// 1) ``rpc.RRef(obj)``
|
| 96 |
+
// 2) create the ``OwnerRRef`` on `rpc.remote()` caller side.
|
| 97 |
+
// What's common in these two cases are, 1) the RRefId hasn't been generated
|
| 98 |
+
// 2) the TypePtr is presented.
|
| 99 |
+
c10::intrusive_ptr<OwnerRRef> createOwnerRRef(const TypePtr& type);
|
| 100 |
+
|
| 101 |
+
// Returns a Future of the OwnerRRef, which will be marked completed when
|
| 102 |
+
// ``OwnerRRef`` is created. This method is used when the TypePtr is not
|
| 103 |
+
// available, e.g., when processing to_here(). The forceCreated flag can be
|
| 104 |
+
// used to ensure that the rref is created on the owner, otherwise throw in
|
| 105 |
+
// cases where the user of this API expects this to return a completed future.
|
| 106 |
+
// Note that the return value is a intrusive_ptr to a c10::ivalue::Future that
|
| 107 |
+
// holds the RRef.
|
| 108 |
+
c10::intrusive_ptr<JitFuture> getOwnerRRef(
|
| 109 |
+
const RRefId& rrefId,
|
| 110 |
+
bool forceCreated = false);
|
| 111 |
+
|
| 112 |
+
// Adding the RRefId of an OwnerRRef into the forks_ map. This is useful when
|
| 113 |
+
// making a remote call to self, which as for now, still goes through serde
|
| 114 |
+
// and invokes request callback. In this case, the OwnerRRef has already been
|
| 115 |
+
// created on the send side, and we need to pass it to the receive side,
|
| 116 |
+
// instead of creating a new OwnerRRef. This is done by adding the OwnerRRef
|
| 117 |
+
// into owners_. However, that alone is not enough, as it could be deleted
|
| 118 |
+
// when all UserRRef die, which would then remove the OwnerRRef from owners_
|
| 119 |
+
// and this could happen before the self remote call finishes. To prevent
|
| 120 |
+
// that, this API adds the RRefId as a ForkId, which will then delete the
|
| 121 |
+
// ForkId when the self remote is done.
|
| 122 |
+
void addSelfAsFork(c10::intrusive_ptr<OwnerRRef>& rref);
|
| 123 |
+
|
| 124 |
+
// Register a fork of the ``OwnerRRef``, and inserts a intrusive_ptr of the
|
| 125 |
+
// ``OwnerRRef`` in a map to keep it alive.
|
| 126 |
+
void addForkOfOwner(const RRefId& rrefId, const ForkId& forkId);
|
| 127 |
+
// Performs the same function as addForkOfOwner but ignores duplicate
|
| 128 |
+
// requests. This idempotent function is used with RREF_FORK_REQUEST calls,
|
| 129 |
+
// whereas all other message types use the non-idempotent variant.
|
| 130 |
+
void addForkOfOwnerIfNotPresent(const RRefId& rrefId, const ForkId& forkId);
|
| 131 |
+
// Delete a fork of the ``OwnerRRef``. NB: this could trigger deletion on the
|
| 132 |
+
// IValue or py::object. For the later, this method will acquire GIL.
|
| 133 |
+
// NB: If this fork deletion triggered deleting OwnerRRef, this method will
|
| 134 |
+
// return a shared_ptr to the OwnerRRef, which is likely to be the last
|
| 135 |
+
// shared_ptr instance for it. Therefore, deleting this shared_ptr<OwnerRRef>
|
| 136 |
+
// will also trigger deleting the object it points to. If OwnerRRef holds a
|
| 137 |
+
// py::object, deleting it require GIL. The call site should guarded it with
|
| 138 |
+
// a GIL and reset the shared_ptr. The GIL-guarded deletion is intentionally
|
| 139 |
+
// left out of this function to avoid creating dependency on pybind.
|
| 140 |
+
c10::intrusive_ptr<RRef> delForkOfOwner(
|
| 141 |
+
const RRefId& rrefId,
|
| 142 |
+
const ForkId& forkId);
|
| 143 |
+
|
| 144 |
+
// Invoked when pickling an RRef to setup child/fork properly
|
| 145 |
+
RRefForkData prepareChildFork(const c10::intrusive_ptr<RRef>& rref);
|
| 146 |
+
// Invoked when unpickling an RRef to send RREF_FORK_REQUEST to owner and
|
| 147 |
+
// send RREF_CHILD_ACCEPT to the parent.
|
| 148 |
+
// NB: forkId is necessary here as the rref could be an OwnerRRef
|
| 149 |
+
void notifyOwnerAndParentOfFork(
|
| 150 |
+
const ForkId& forkId,
|
| 151 |
+
worker_id_t parent,
|
| 152 |
+
const c10::intrusive_ptr<RRef>& rref);
|
| 153 |
+
|
| 154 |
+
// When a UserRRef is forked to another worker (user or owner), it is added
|
| 155 |
+
// into pendingChildren_ to be held alive until it receives RREF_CHILD_ACCEPT
|
| 156 |
+
// from the child.
|
| 157 |
+
// NB: This is necessary for both user and owner child. As we do not have FIFO
|
| 158 |
+
// communication between workers, we need this strategy to make sure that all
|
| 159 |
+
// previously submitted rpc/remote calls are acked before sending out the
|
| 160 |
+
// RREF_USER_DELETE message. Otherwise, the OwnerRRef could be deleted too
|
| 161 |
+
// soon.
|
| 162 |
+
void addPendingChild(
|
| 163 |
+
const ForkId& forkId,
|
| 164 |
+
const c10::intrusive_ptr<RRef>& rref);
|
| 165 |
+
void delPendingChild(const ForkId& forkId);
|
| 166 |
+
|
| 167 |
+
// When a UserRRef is created, it is added into pendingUsers_ to be held alive
|
| 168 |
+
// until it receives RREF_USER_ACCEPT from the owner.
|
| 169 |
+
void addPendingUser(
|
| 170 |
+
const ForkId& forkId,
|
| 171 |
+
const c10::intrusive_ptr<RRef>& rref);
|
| 172 |
+
void delPendingUser(const ForkId& forkId);
|
| 173 |
+
void addConfirmedUser(
|
| 174 |
+
const ForkId& forkId,
|
| 175 |
+
const c10::intrusive_ptr<RRef>& rref);
|
| 176 |
+
|
| 177 |
+
// Retrieve a pending user given the fork ID. Throws if the user has already
|
| 178 |
+
// been confirmed (i.e. is no longer in the pendingUsers_ map).
|
| 179 |
+
c10::intrusive_ptr<RRef> getPendingUser(const ForkId& forkId);
|
| 180 |
+
|
| 181 |
+
// Start recording new pending UserRRefs. All pending UserRRefs introduced
|
| 182 |
+
// after this point will be put into the thread_local userTable_, which will
|
| 183 |
+
// then be consumed and cleared in waitForThreadLocalPendingRRefs().
|
| 184 |
+
void recordThreadLocalPendingRRefs();
|
| 185 |
+
// End recording new pending UserRRefs, and clear the thread_local userTable_.
|
| 186 |
+
// Returns a Future which will be marked as completed when all pending
|
| 187 |
+
// UserRRefs in the current userTable_ are confirmed by their owners. The bool
|
| 188 |
+
// value in the Future is unused.
|
| 189 |
+
// This method is useful to make sure RRefs in user function arguments are
|
| 190 |
+
// confirmed before launching user code.
|
| 191 |
+
// NB: Callers of this method does not need to keep the returned Future alive,
|
| 192 |
+
// because this Future is already captured in callbacks of the
|
| 193 |
+
// PendingUserState. If there is no pending UserRRefs, this method returns a
|
| 194 |
+
// completed future.
|
| 195 |
+
c10::intrusive_ptr<JitFuture> waitForThreadLocalPendingRRefs();
|
| 196 |
+
// Only call this function when there are errors during a recording session,
|
| 197 |
+
// and it is likely that waitForThreadLocalPendingRRefs() cannot be invoked
|
| 198 |
+
// properly.
|
| 199 |
+
// TODO: make this a context guard
|
| 200 |
+
void clearRecordedPendingRRefsOnError();
|
| 201 |
+
|
| 202 |
+
void delUser(
|
| 203 |
+
const worker_id_t owner,
|
| 204 |
+
const RRefId& rrefId,
|
| 205 |
+
const ForkId& forkId);
|
| 206 |
+
void delAllUsersAndUnforkedOwners(std::chrono::milliseconds timeoutMillis);
|
| 207 |
+
|
| 208 |
+
std::unordered_map<std::string, std::string> getDebugInfo();
|
| 209 |
+
|
| 210 |
+
private:
|
| 211 |
+
struct PendingUserState {
|
| 212 |
+
PendingUserState(c10::intrusive_ptr<RRef> rref)
|
| 213 |
+
: rref_(std::move(rref)),
|
| 214 |
+
confirmationFuture_(c10::make_intrusive<JitFuture>(BoolType::get())) {
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
inline void confirm() {
|
| 218 |
+
c10::static_intrusive_pointer_cast<UserRRef>(rref_)->confirm();
|
| 219 |
+
confirmationFuture_->markCompleted();
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
c10::intrusive_ptr<RRef> rref_;
|
| 223 |
+
// Use Future.wait() and Future.markCompleted() to block and unblock user
|
| 224 |
+
// functions. The bool value wrapped by the future_ is not used.
|
| 225 |
+
c10::intrusive_ptr<JitFuture> confirmationFuture_;
|
| 226 |
+
};
|
| 227 |
+
|
| 228 |
+
RRefContext(std::shared_ptr<RpcAgent>);
|
| 229 |
+
|
| 230 |
+
c10::intrusive_ptr<UserRRef> createUserRRef(
|
| 231 |
+
worker_id_t ownerId,
|
| 232 |
+
const RRefId& rrefId,
|
| 233 |
+
const ForkId& forkId,
|
| 234 |
+
const TypePtr& type);
|
| 235 |
+
|
| 236 |
+
void finishForkRequest(const ForkId& forkId, worker_id_t parent);
|
| 237 |
+
|
| 238 |
+
// If there is any leak on any RRef, this method will throw an error.
|
| 239 |
+
void checkRRefLeaks(bool ignoreRRefLeak);
|
| 240 |
+
|
| 241 |
+
static std::atomic<local_id_t> nextLocalId_;
|
| 242 |
+
|
| 243 |
+
const std::shared_ptr<RpcAgent> agent_;
|
| 244 |
+
mutable std::mutex mutex_;
|
| 245 |
+
// Keep OwnerRRefs alive until there is no living UserRRefs.
|
| 246 |
+
std::unordered_map<RRefId, c10::intrusive_ptr<RRef>, RRefId::Hash> owners_;
|
| 247 |
+
// A map to track OwnerRRefs that are requested but not yet created. This can
|
| 248 |
+
// happen if the to_here() message is processed on the owner before the
|
| 249 |
+
// corresponding creator rpc.remote() message. If this happens, instead of
|
| 250 |
+
// to_here() RPC thread to block waiting for the OwnerRRef creation, the
|
| 251 |
+
// RRefContext returns a Future, so that the RPC request processing logic can
|
| 252 |
+
// attach subsequent code as a callback to that Future.
|
| 253 |
+
// NB: the OwnerRRefs in this map must be cleared when the corresponding
|
| 254 |
+
// OwnerRRef is created. Note that the values in this map are intrusive_ptrs
|
| 255 |
+
// to c10::ivalue::Future that will be marked completed with the owner RRef.
|
| 256 |
+
std::unordered_map<RRefId, c10::intrusive_ptr<JitFuture>, RRefId::Hash>
|
| 257 |
+
pendingOwners_;
|
| 258 |
+
// Tracks known living UserRRefs of an OwnerRRef
|
| 259 |
+
std::unordered_map<
|
| 260 |
+
RRefId,
|
| 261 |
+
std::unordered_set<ForkId, ForkId::Hash>,
|
| 262 |
+
RRefId::Hash>
|
| 263 |
+
forks_;
|
| 264 |
+
|
| 265 |
+
// This cond var is used by deleteAllUsers(), a event notification is sent if
|
| 266 |
+
// number of pending UserRRef or UserRRef children is reduced, or
|
| 267 |
+
// number of owned OwnerRRef is reduced.
|
| 268 |
+
std::condition_variable deleteAllUsersCV_;
|
| 269 |
+
// The follow 3 maps keep UserRRefs alive by holding a intrusive_ptr to the
|
| 270 |
+
// RRef instances. A UserRRef must be added into this map if any of the
|
| 271 |
+
// following two conditions is true:
|
| 272 |
+
//
|
| 273 |
+
// (1) A UserRRef has not been accepted by owner yet.
|
| 274 |
+
//
|
| 275 |
+
// It can be used or shared, but cannot be deleted, and hence kept alive
|
| 276 |
+
// in this map. A message of type RREF_USER_ACCEPT will move the
|
| 277 |
+
// corresponding RRef from pendingUsers_ map to confirmedUsers_ map.
|
| 278 |
+
std::unordered_map<ForkId, std::shared_ptr<PendingUserState>, ForkId::Hash>
|
| 279 |
+
pendingUsers_;
|
| 280 |
+
// UserRRefs are added into this map when it is confirmed by the owner.
|
| 281 |
+
// When destroying RRefContext this map helps to find local UserRRefs
|
| 282 |
+
// and send delete messages if they are still not deleted by Python
|
| 283 |
+
// garbage collection.
|
| 284 |
+
std::unordered_map<ForkId, c10::weak_intrusive_ptr<RRef>, ForkId::Hash>
|
| 285 |
+
confirmedUsers_;
|
| 286 |
+
|
| 287 |
+
// (2) A UserRRef has forked a child UserRRef which has not been accepted by
|
| 288 |
+
// the owner yet.
|
| 289 |
+
//
|
| 290 |
+
// In this case, this UserRRef cannot send out RREF_USER_DELETE message,
|
| 291 |
+
// as it could potentially trigger the OwnerRRef been deleted before the
|
| 292 |
+
// owner learns about the forked child.
|
| 293 |
+
std::unordered_map<ForkId, c10::intrusive_ptr<RRef>, ForkId::Hash>
|
| 294 |
+
pendingChildren_;
|
| 295 |
+
|
| 296 |
+
// The RRef context performs its operations through async RPC requests, in
|
| 297 |
+
// order to not block the user code. Therefore the RRef context's state may be
|
| 298 |
+
// lagging a bit behind what it is intended to be, while it waits for these
|
| 299 |
+
// requests to complete. To allow syncing when needed, we store the count of
|
| 300 |
+
// these pending requests, so that users can wait for it to reach zero.
|
| 301 |
+
std::atomic<int64_t> numPendingFutures_{0};
|
| 302 |
+
|
| 303 |
+
std::mutex destroyedMutex_;
|
| 304 |
+
bool destroyed_{false};
|
| 305 |
+
|
| 306 |
+
// Thread local states to keep UserRRefs deserialized from user function
|
| 307 |
+
// arguments.
|
| 308 |
+
static thread_local std::vector<std::shared_ptr<PendingUserState>> userTable_;
|
| 309 |
+
// A flag indicating whether subsequently created UserRRefs should be added to
|
| 310 |
+
// the thread_local userTable_. The flag is set to true before serializing
|
| 311 |
+
// RPC arguments and then set to false before running the corresponding
|
| 312 |
+
// user code. See addPendingUser and delPendingUser for more details.
|
| 313 |
+
// NB: The reason for having this flag is because addPendingUser are called in
|
| 314 |
+
// two cases, and we only want to track the 2nd case.
|
| 315 |
+
// (1) RRef as the return value: when calling rpc.remote, the UserRRef on the
|
| 316 |
+
// caller side is added to the context using addPendingUser.
|
| 317 |
+
// (2) RRef as an argument: When running an RPC using RRefs as arguments, the
|
| 318 |
+
// RRef is forwarded to the callee as new UserRRefs (if the callee is not
|
| 319 |
+
// the owner). In this case, we block running the user function until all
|
| 320 |
+
// UserRRefs are confirmed by the owner.
|
| 321 |
+
// This contract gurantees that no UserRRefs can be used remotely without
|
| 322 |
+
// confirmation. Note that, however, the UserRRef created by rpc.remote can
|
| 323 |
+
// still be passed to local functions as arguments and used there. This is by
|
| 324 |
+
// design, because this feature is especially useful when, say a master node
|
| 325 |
+
// creates multiple UserRRefs in a loop and then shares them with other nodes.
|
| 326 |
+
// Blocking every iteration in the loop until RRefs are confirmed will slow
|
| 327 |
+
// this down. This nuance on UserRRef can be interpreted as we only make
|
| 328 |
+
// exceptions for UserRRef creators. And using the UserRRef on its creator
|
| 329 |
+
// without confirmation is OK, because the creator would either call to_here
|
| 330 |
+
// or forward the UserRRef, and both would then require confirmations from the
|
| 331 |
+
// owner.
|
| 332 |
+
static thread_local bool recording_;
|
| 333 |
+
};
|
| 334 |
+
|
| 335 |
+
} // namespace torch::distributed::rpc
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_impl.h
ADDED
|
@@ -0,0 +1,416 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/jit_type.h>
|
| 4 |
+
#include <ATen/core/rref_interface.h>
|
| 5 |
+
#include <c10/core/Event.h>
|
| 6 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 7 |
+
#include <torch/csrc/distributed/rpc/rpc_agent.h>
|
| 8 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
|
| 11 |
+
#include <atomic>
|
| 12 |
+
|
| 13 |
+
namespace torch::distributed::rpc {
|
| 14 |
+
|
| 15 |
+
class RRef;
|
| 16 |
+
class RRefContext;
|
| 17 |
+
class UserRRef;
|
| 18 |
+
|
| 19 |
+
constexpr int OWNER_IDX = 0; // index of ownerId in the tuple
|
| 20 |
+
constexpr int RREFID_ON_IDX = 1; // index of RRefId.createdOn_ in the tuple
|
| 21 |
+
constexpr int RREFID_ID_IDX = 2; // index of RRefId.localId_ in the tuple
|
| 22 |
+
constexpr int FORKID_ON_IDX = 3; // index of ForkId.createdOn_ in the tuple
|
| 23 |
+
constexpr int FORKID_ID_IDX = 4; // index of ForkId.localId_ in the tuple
|
| 24 |
+
constexpr int PARENT_IDX = 5; // index of parent in the tuple
|
| 25 |
+
constexpr int TYPE_IDX = 6; // index of parent in the tuple
|
| 26 |
+
|
| 27 |
+
// NB: if more fields are added, make sure this field is also bumped
|
| 28 |
+
constexpr int RFD_TUPLE_SIZE = 7; // number of RRefForkData fields in py::tuple
|
| 29 |
+
|
| 30 |
+
// Represents fork of an RRef to be sent over the wire.
|
| 31 |
+
struct TORCH_API RRefForkData {
|
| 32 |
+
const worker_id_t ownerId_;
|
| 33 |
+
const RRefId rrefId_;
|
| 34 |
+
const ForkId forkId_;
|
| 35 |
+
const worker_id_t parent_;
|
| 36 |
+
const std::string typeStr_;
|
| 37 |
+
|
| 38 |
+
RRefForkData(
|
| 39 |
+
worker_id_t ownerId,
|
| 40 |
+
const RRefId& rrefId,
|
| 41 |
+
const ForkId& forkId,
|
| 42 |
+
worker_id_t parent,
|
| 43 |
+
std::string typeStr);
|
| 44 |
+
};
|
| 45 |
+
|
| 46 |
+
// Note [RRef Protocol]
|
| 47 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 48 |
+
//
|
| 49 |
+
// [Background]
|
| 50 |
+
//
|
| 51 |
+
// RRef stands for Remote REFerence. Each RRef is owned by a single worker
|
| 52 |
+
// (i.e., owner) and can be used by multiple users. The owner stores the real
|
| 53 |
+
// data referenced by its RRefs. RRef needs to support fast and scalable RPC.
|
| 54 |
+
// Hence, in the design, we avoid using a single global master to keep RRef
|
| 55 |
+
// states, instead owners will keep track of the global reference counts
|
| 56 |
+
// for its RRefs. Every RRef can be uniquely identified by a global RRefId,
|
| 57 |
+
// which is assigned at the time it is first created either on a user or on the
|
| 58 |
+
// owner.
|
| 59 |
+
//
|
| 60 |
+
// On the owner worker, there is only one OwnerRRef instance, which contains the
|
| 61 |
+
// real data, while on user workers, there can be as many UserRRefs as
|
| 62 |
+
// necessary, and UserRRef does not hold the data. All usage on the OwnerRRef
|
| 63 |
+
// should retrieve the unique OwnerRRef instance using the globally unique
|
| 64 |
+
// RRefId. //A UserRRef will be created when it is used as an argument or return
|
| 65 |
+
// value in dist.rpc or dist.remote call, but RRef forking and reference
|
| 66 |
+
// counting (RC) are completely transparent to applications. Every UserRRef will
|
| 67 |
+
// also have its globally unique ForkId.
|
| 68 |
+
//
|
| 69 |
+
// [Assumptions]
|
| 70 |
+
//
|
| 71 |
+
// 1. Transient Network Failures
|
| 72 |
+
//
|
| 73 |
+
// TODO: current RRef implementation does not tolerate failures
|
| 74 |
+
//
|
| 75 |
+
// The RRef design handles transient network failures by retrying
|
| 76 |
+
// messages. Node crashes or permanent network partition is beyond the scope.
|
| 77 |
+
// When those incidents occur, the application may take down all workers, revert
|
| 78 |
+
// to the previous checkpoint, and resume training.
|
| 79 |
+
//
|
| 80 |
+
// 2. Non-idempotent UDFs
|
| 81 |
+
//
|
| 82 |
+
// We assume UDFs are not idempotent and therefore cannot be retried. However,
|
| 83 |
+
// internal RRef control messages are idempotent and retried upon message
|
| 84 |
+
// failure.
|
| 85 |
+
//
|
| 86 |
+
// TODO: RRef internal messages are not yet idempotent
|
| 87 |
+
//
|
| 88 |
+
// 3. Out of Order Message Delivery
|
| 89 |
+
//
|
| 90 |
+
// We do not assume message delivery order between any pair of nodes, because
|
| 91 |
+
// both sender and receiver are using multiple threads. There is no guarantee on
|
| 92 |
+
// which message will be processed first.
|
| 93 |
+
//
|
| 94 |
+
// [RRef Lifetime]
|
| 95 |
+
//
|
| 96 |
+
// The goal of the protocol is to delete an OwnerRRef at an appropriate time.
|
| 97 |
+
// The right time to delete an OwnerRRef is when there are no living UserRRefs
|
| 98 |
+
// and Python GC also agrees to delete the OwnerRRef instance on the owner. The
|
| 99 |
+
// tricky part is to determine if there are any living UserRRefs.
|
| 100 |
+
//
|
| 101 |
+
// A user can get a UserRRef in three situations:
|
| 102 |
+
//
|
| 103 |
+
// (1). Receiving a UserRRef from the owner.
|
| 104 |
+
// (2). Receiving a UserRRef from another user.
|
| 105 |
+
// (3). Creating a new UserRRef owned by another worker.
|
| 106 |
+
//
|
| 107 |
+
// (1) is the simplest case where the owner initiates the fork, and hence it can
|
| 108 |
+
// easily increment local RC. The only requirement is that any UserRRef must
|
| 109 |
+
// notify the owner before destruction. Hence, we need the first guarantee:
|
| 110 |
+
//
|
| 111 |
+
// G1. The owner will be notified when any UserRRef is deleted.
|
| 112 |
+
//
|
| 113 |
+
// As messages might come delayed or out-of-order, we need more one guarantee to
|
| 114 |
+
// make sure the delete message is not sent out too soon. Let us first introduce
|
| 115 |
+
// a new concept. If A sends an RPC to B that involves an RRef, we call the RRef
|
| 116 |
+
// on A the parent RRef and the RRef on B the child RRef.
|
| 117 |
+
//
|
| 118 |
+
// G2. Parent RRef cannot be deleted until the child RRef is confirmed by the
|
| 119 |
+
// owner.
|
| 120 |
+
//
|
| 121 |
+
// Under (1), where the caller is UserRRef and callee is OwnerRRef, it simply
|
| 122 |
+
// means that the user will not send out the delete message until all previous
|
| 123 |
+
// messages are ACKed. Note that ACKed does not mean the owner finishes
|
| 124 |
+
// executing the function, instead, it only means the owner has retrieved its
|
| 125 |
+
// local OwnerRRef and about to pass it to the function, which is sufficient to
|
| 126 |
+
// keep the OwnerRRef alive even if the delete message from the user arrives at
|
| 127 |
+
// the owner before the function finishes execution.
|
| 128 |
+
//
|
| 129 |
+
// With (2) and (3), it is possible that the owner only partially knows the RRef
|
| 130 |
+
// fork graph or not even knowing it at all. For example, the RRef could be
|
| 131 |
+
// constructed on a user, and before the owner receives the RPC call, the
|
| 132 |
+
// creator user might have already shared the RRef with other users, and those
|
| 133 |
+
// users could further share the RRef. One invariant is that the fork graph of
|
| 134 |
+
// any RRef is always a tree rooted at the owner, because forking an RRef always
|
| 135 |
+
// creates a new RRef instance, and hence every RRef has a single parent. One
|
| 136 |
+
// nasty detail is that when an RRef is created on a user, technically the owner
|
| 137 |
+
// is not its parent but we still consider it that way and it does not break the
|
| 138 |
+
// argument below.
|
| 139 |
+
//
|
| 140 |
+
// The owner's view on any node (fork) in the tree has three stages:
|
| 141 |
+
//
|
| 142 |
+
// 1) unknown -> 2) known -> 3) deleted.
|
| 143 |
+
//
|
| 144 |
+
// The owner's view on the entire tree keeps changing. The owner deletes its
|
| 145 |
+
// OwnerRRef instance when it thinks there are no living UserRRefs, i.e., when
|
| 146 |
+
// OwnerRRef is deleted, all UserRRefs could be either indeed deleted or
|
| 147 |
+
// unknown. The dangerous case is when some forks are unknown and others are
|
| 148 |
+
// deleted.
|
| 149 |
+
//
|
| 150 |
+
// G2 trivially guarantees that no parent UserRRef Y can be deleted before the
|
| 151 |
+
// owner knows all of Y's children UserRRefs.
|
| 152 |
+
//
|
| 153 |
+
// However, it is possible that the child UserRRef Z may be deleted before the
|
| 154 |
+
// owner knows its parent Y. More specifically, this can happen when all of Z's
|
| 155 |
+
// messages are processed by the owner before all messages from Y, including the
|
| 156 |
+
// delete message. Nevertheless, this does not cause any problem. Because, at
|
| 157 |
+
// least one of Y's ancestor will be alive, and it will prevent the owner from
|
| 158 |
+
// deleting the OwnerRRef. Consider the following example: (NB: this scenario
|
| 159 |
+
// will no longer relevant when we block UDF until all RRefs are confirmed by
|
| 160 |
+
// the owner)
|
| 161 |
+
//
|
| 162 |
+
// OwnerRRef -> A -> Y -> Z
|
| 163 |
+
//
|
| 164 |
+
// OwnerRRef forks to A, then A forks to Y, and Y forks to Z. Z can be deleted
|
| 165 |
+
// without OwnerRRef knowing Y. However, the OwnerRRef will at least know A, as
|
| 166 |
+
// the owner directly forks the RRef to A. A won't die before the owner knows Y.
|
| 167 |
+
//
|
| 168 |
+
// Things get a little trickier if the RRef is created on a user:
|
| 169 |
+
//
|
| 170 |
+
// OwnerRRef
|
| 171 |
+
// ^
|
| 172 |
+
// |
|
| 173 |
+
// A -> Y -> Z
|
| 174 |
+
//
|
| 175 |
+
// If Z calls to_here on the UserRRef, the owner at least knows A when Z is
|
| 176 |
+
// deleted, because otherwise to_here wouldn't finish. If Z does not call
|
| 177 |
+
// to_here, it is possible that the owner receives all messages from Z before
|
| 178 |
+
// any message from A and Y. In this case, as the real data of the OwnerRRef has
|
| 179 |
+
// not been created yet, there is nothing to be deleted either. It is the same
|
| 180 |
+
// as Z does not exist at all Hence, it's still OK.
|
| 181 |
+
//
|
| 182 |
+
// See #26759 for more details and discussions.
|
| 183 |
+
//
|
| 184 |
+
// TODO: make RRef an IValue, and edit createStackForSchema accordingly
|
| 185 |
+
// TODO: make RRef system messages idempotent and retry on failures.
|
| 186 |
+
//
|
| 187 |
+
// ``RRef`` is the base type for both ``UserRRef`` and ``OwnerRRef``.
|
| 188 |
+
// Each ``RRef`` has a globally unique ``RRefId``.
|
| 189 |
+
class TORCH_API RRef : public RRefInterface {
|
| 190 |
+
public:
|
| 191 |
+
// RRef is made NOT copyable NOT movable to prevent messing up reference
|
| 192 |
+
// counting.
|
| 193 |
+
explicit RRef(const RRef& other) = delete;
|
| 194 |
+
explicit RRef(RRef&& other) = delete;
|
| 195 |
+
RRef& operator=(RRef&& other) = delete;
|
| 196 |
+
|
| 197 |
+
~RRef() override = default;
|
| 198 |
+
|
| 199 |
+
// returns the worker id of the owner
|
| 200 |
+
inline worker_id_t owner() const override {
|
| 201 |
+
return ownerId_;
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
// returns the worker name of the owner
|
| 205 |
+
inline std::string ownerName() const override {
|
| 206 |
+
return RpcAgent::getCurrentRpcAgent()->getWorkerInfo(ownerId_).name_;
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
// returns the worker info of the owner
|
| 210 |
+
inline WorkerInfo ownerWorkerInfo() const {
|
| 211 |
+
return RpcAgent::getCurrentRpcAgent()->getWorkerInfo(ownerId_);
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
// Returns the globally unique RRefId of this RRef
|
| 215 |
+
inline const RRefId& rrefId() const {
|
| 216 |
+
return rrefId_;
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
inline bool isPyObj() const {
|
| 220 |
+
return type_ == PyObjectType::get();
|
| 221 |
+
}
|
| 222 |
+
inline const TypePtr type() const override {
|
| 223 |
+
return type_;
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
// Save the future corresponding to the creation of this RRef on a remote
|
| 227 |
+
// node. Note that this is only set when processing requests invoked with
|
| 228 |
+
// rpc.remote. This is only used to get the future corresponding to the rref
|
| 229 |
+
// for profiling use cases.
|
| 230 |
+
inline void registerOwnerCreationFuture(c10::intrusive_ptr<JitFuture> fut) {
|
| 231 |
+
ownerCreationFuture_ = std::move(fut);
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
// Get the future corresponding to the creation of this rref.
|
| 235 |
+
inline c10::intrusive_ptr<JitFuture> getOwnerCreationFuture() const {
|
| 236 |
+
return ownerCreationFuture_;
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
// Check if creation of this RRef on owner node has timed out.
|
| 240 |
+
inline bool getTimedOut() const {
|
| 241 |
+
return timedOut_.load();
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
// Dispatches an error to the correct handler based on its RPCErrorType.
|
| 245 |
+
void handleError(RPCErrorType errorType, const JitFuture& JitFuture);
|
| 246 |
+
|
| 247 |
+
// Send delete UserRRef request to Owner,
|
| 248 |
+
// if the request hasn't been sent yet.
|
| 249 |
+
// There are 2 cases to call it,
|
| 250 |
+
// 1, Python GC decides end of UserRRef lifetime, calling destructor.
|
| 251 |
+
// 2, RPC module graceful shutdown calls it on all UserRRefs tracked
|
| 252 |
+
// in the RRefContext.
|
| 253 |
+
virtual void tryDel() {}
|
| 254 |
+
|
| 255 |
+
protected:
|
| 256 |
+
// Indicates that the creation of this RRef on owner node has timed out.
|
| 257 |
+
inline void setTimedOut() {
|
| 258 |
+
timedOut_ = true;
|
| 259 |
+
}
|
| 260 |
+
friend class RRefContext;
|
| 261 |
+
|
| 262 |
+
RRef(worker_id_t ownerId, const RRefId& rrefId, TypePtr type);
|
| 263 |
+
|
| 264 |
+
virtual RRefForkData fork() const;
|
| 265 |
+
|
| 266 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 267 |
+
const worker_id_t ownerId_;
|
| 268 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 269 |
+
const RRefId rrefId_;
|
| 270 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 271 |
+
std::atomic<bool> timedOut_{false};
|
| 272 |
+
|
| 273 |
+
// type field to denote the type of the element that the RRef is holding
|
| 274 |
+
// it could be any TypePtr that JIT support, including PyObjectType
|
| 275 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 276 |
+
const TypePtr type_;
|
| 277 |
+
// Future corresponding to request to create RRef on remote node.
|
| 278 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 279 |
+
c10::intrusive_ptr<JitFuture> ownerCreationFuture_;
|
| 280 |
+
};
|
| 281 |
+
|
| 282 |
+
// ``UserRRef`` represents a user of an RRef. Besides the ``RRefId``, each user
|
| 283 |
+
// also has a globally unique ``ForkId`` to identify this user. ``UserRRef``
|
| 284 |
+
// never owns the real value, the only way to get the value of the ``RRef`` is
|
| 285 |
+
// to call ``to_here()`` and get a copy..
|
| 286 |
+
class TORCH_API UserRRef final : public RRef {
|
| 287 |
+
public:
|
| 288 |
+
UserRRef(const UserRRef& other) = delete;
|
| 289 |
+
UserRRef(UserRRef&& other) = delete;
|
| 290 |
+
UserRRef& operator=(const UserRRef& other) = delete;
|
| 291 |
+
UserRRef& operator=(UserRRef&& other) = delete;
|
| 292 |
+
|
| 293 |
+
UserRRef(
|
| 294 |
+
worker_id_t ownerId,
|
| 295 |
+
const RRefId& rrefId,
|
| 296 |
+
const ForkId& forkId,
|
| 297 |
+
TypePtr type);
|
| 298 |
+
|
| 299 |
+
inline bool isOwner() const override {
|
| 300 |
+
return false;
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
inline bool confirmedByOwner() const override {
|
| 304 |
+
return confirmedByOwner_;
|
| 305 |
+
}
|
| 306 |
+
|
| 307 |
+
// Returns the globally unique ForkId of this RRef
|
| 308 |
+
const ForkId& forkId() const;
|
| 309 |
+
|
| 310 |
+
// Get of copy of the value from the ``OwnerRRef``. If the value is not ready
|
| 311 |
+
// yet, this call will block.
|
| 312 |
+
IValue toHere(
|
| 313 |
+
const float timeoutSeconds =
|
| 314 |
+
torch::distributed::rpc::kUnsetRpcTimeout) const;
|
| 315 |
+
|
| 316 |
+
void tryDel() override;
|
| 317 |
+
|
| 318 |
+
// Will be called when refcount reaches 0.
|
| 319 |
+
// Upon destruction, this ``UserRRef`` will tell the owner to deref.
|
| 320 |
+
void release_resources() override;
|
| 321 |
+
|
| 322 |
+
// Will be called when both refcount and weakcount reach 0. See
|
| 323 |
+
// https://github.com/pytorch/pytorch/blob/9116f02bebf3a5260feef5732d36c54ecb3b4033/c10/util/intrusive_ptr.h#L204
|
| 324 |
+
// This is called on destructing the wrapping intrusive_ptr_target instance
|
| 325 |
+
// and it's data members.
|
| 326 |
+
~UserRRef() override;
|
| 327 |
+
|
| 328 |
+
private:
|
| 329 |
+
friend class RRefContext;
|
| 330 |
+
|
| 331 |
+
RRefForkData fork() const override;
|
| 332 |
+
inline void confirm() {
|
| 333 |
+
confirmedByOwner_ = true;
|
| 334 |
+
}
|
| 335 |
+
|
| 336 |
+
const ForkId forkId_;
|
| 337 |
+
|
| 338 |
+
// Indicates if this user has sent delete message to it's owner.
|
| 339 |
+
// Note, thread safety is needed because delete message could be sent by
|
| 340 |
+
// either the destructor called by Python garbage collection or RRefContext
|
| 341 |
+
// proactive cleanup on RPC graceful shutdown.
|
| 342 |
+
std::mutex deletedOnOwnerMutex_;
|
| 343 |
+
bool deletedOnOwner_{false};
|
| 344 |
+
// Indicating whether this UserRRef has been confirmed by its owner.
|
| 345 |
+
std::atomic<bool> confirmedByOwner_;
|
| 346 |
+
};
|
| 347 |
+
|
| 348 |
+
// Keep the template only on the derived class because ``RRefContext`` needs to
|
| 349 |
+
// erase the type on ``RRef`` and keep them in one map.
|
| 350 |
+
class TORCH_API OwnerRRef final : public RRef {
|
| 351 |
+
public:
|
| 352 |
+
OwnerRRef(const OwnerRRef& other) = delete;
|
| 353 |
+
OwnerRRef(OwnerRRef&& other) = delete;
|
| 354 |
+
OwnerRRef& operator=(const OwnerRRef& other) = delete;
|
| 355 |
+
OwnerRRef& operator=(OwnerRRef&& other) = delete;
|
| 356 |
+
|
| 357 |
+
OwnerRRef(
|
| 358 |
+
worker_id_t ownerId,
|
| 359 |
+
const RRefId& rrefId,
|
| 360 |
+
TypePtr type,
|
| 361 |
+
std::vector<c10::Device> devices);
|
| 362 |
+
|
| 363 |
+
OwnerRRef(
|
| 364 |
+
worker_id_t ownerId,
|
| 365 |
+
const RRefId& rrefId,
|
| 366 |
+
TypePtr type,
|
| 367 |
+
std::optional<IValue> value,
|
| 368 |
+
std::vector<c10::Device> devices);
|
| 369 |
+
|
| 370 |
+
inline bool isOwner() const override {
|
| 371 |
+
return true;
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
// OwnerRRef is always confirmed, while UserRRef is only confirmed when the
|
| 375 |
+
// owner knows about it.
|
| 376 |
+
inline bool confirmedByOwner() const override {
|
| 377 |
+
return true;
|
| 378 |
+
}
|
| 379 |
+
|
| 380 |
+
// Get a constant reference of the real value. This method will block if the
|
| 381 |
+
// value is not ready. This method does not need GIL as it does not create
|
| 382 |
+
// any new py::object. It will throw if there is an error.
|
| 383 |
+
const IValue& getValue() const;
|
| 384 |
+
|
| 385 |
+
// Set the value of this ``OwnerRRef``. This method does not need GIL as it
|
| 386 |
+
// does not create any new py::object.
|
| 387 |
+
void setValue(IValue&& value);
|
| 388 |
+
// Sets the value of this ``OwnerRRef`` to contain an exception.
|
| 389 |
+
void setError(std::exception_ptr eptr);
|
| 390 |
+
|
| 391 |
+
// Has a value or error been set?
|
| 392 |
+
bool hasValue() const;
|
| 393 |
+
// Gets a future that is satisfied when the value or error is set.
|
| 394 |
+
c10::intrusive_ptr<JitFuture> getFuture();
|
| 395 |
+
|
| 396 |
+
private:
|
| 397 |
+
friend class RRefContext;
|
| 398 |
+
|
| 399 |
+
c10::intrusive_ptr<JitFuture> future_;
|
| 400 |
+
};
|
| 401 |
+
|
| 402 |
+
TORCH_API std::ostream& operator<<(std::ostream& os, const RRef& rref);
|
| 403 |
+
|
| 404 |
+
// Helper function that casts from c10::RRefInterface to OwnerRRef
|
| 405 |
+
inline TORCH_API c10::intrusive_ptr<OwnerRRef> fromRRefInterface(
|
| 406 |
+
const c10::intrusive_ptr<c10::RRefInterface>& rrefInterface) {
|
| 407 |
+
return c10::static_intrusive_pointer_cast<OwnerRRef>(rrefInterface);
|
| 408 |
+
}
|
| 409 |
+
|
| 410 |
+
// Helper function that casts from OwnerRRef to c10::RRefInterface
|
| 411 |
+
inline TORCH_API c10::intrusive_ptr<c10::RRefInterface> fromOwnerRRef(
|
| 412 |
+
const c10::intrusive_ptr<RRef>& ownerRRef) {
|
| 413 |
+
return c10::static_intrusive_pointer_cast<c10::RRefInterface>(ownerRRef);
|
| 414 |
+
}
|
| 415 |
+
|
| 416 |
+
} // namespace torch::distributed::rpc
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_call.h
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 5 |
+
#include <torch/csrc/jit/runtime/operator.h>
|
| 6 |
+
#include <torch/csrc/jit/serialization/pickler.h>
|
| 7 |
+
#include <optional>
|
| 8 |
+
#include <vector>
|
| 9 |
+
|
| 10 |
+
namespace torch {
|
| 11 |
+
namespace distributed {
|
| 12 |
+
namespace rpc {
|
| 13 |
+
|
| 14 |
+
using torch::jit::Operator;
|
| 15 |
+
|
| 16 |
+
// A ScriptCall instance represents an invocation of a builtin operator for a
|
| 17 |
+
// TorchScript function. If it is a builtin operator, it
|
| 18 |
+
// contains a shared ptr to the `Operator` and a list of arguments.
|
| 19 |
+
// If it is a TorchScript function, it contains a non empty qualifiedName string
|
| 20 |
+
// to the TorchScript function schema name and a list of arguments.
|
| 21 |
+
class TORCH_API ScriptCall : public RpcCommandBase {
|
| 22 |
+
public:
|
| 23 |
+
// Constructor for builitin operator call.
|
| 24 |
+
ScriptCall(std::shared_ptr<Operator> op, std::vector<at::IValue>&& stack);
|
| 25 |
+
// Constructor for TorchScript function call.
|
| 26 |
+
ScriptCall(
|
| 27 |
+
const c10::QualifiedName& qualifiedName,
|
| 28 |
+
std::vector<at::IValue>&& stack,
|
| 29 |
+
const bool isAsyncExecution = false);
|
| 30 |
+
|
| 31 |
+
bool hasOp() const;
|
| 32 |
+
std::shared_ptr<Operator> op() const;
|
| 33 |
+
bool hasQualifiedName() const;
|
| 34 |
+
const c10::QualifiedName& qualifiedName() const;
|
| 35 |
+
// return the argument stack of this builtin operator
|
| 36 |
+
const std::vector<at::IValue>& stack() const;
|
| 37 |
+
std::vector<at::IValue>& stackRef();
|
| 38 |
+
inline bool isAsyncExecution() const {
|
| 39 |
+
return isAsyncExecution_;
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 43 |
+
static std::unique_ptr<ScriptCall> fromMessage(const Message& message);
|
| 44 |
+
|
| 45 |
+
~ScriptCall() override = default;
|
| 46 |
+
|
| 47 |
+
protected:
|
| 48 |
+
virtual void toIValues(std::vector<at::IValue>& ivalues) const;
|
| 49 |
+
static std::unique_ptr<ScriptCall> fromIValues(
|
| 50 |
+
std::vector<at::IValue>& ivalues);
|
| 51 |
+
|
| 52 |
+
private:
|
| 53 |
+
// Given an operator symbol and a string schema, return the matched operator.
|
| 54 |
+
static std::shared_ptr<Operator> matchOperator(const std::string& str_schema);
|
| 55 |
+
|
| 56 |
+
static const std::string BUILTIN_OP_NAMESPACE_;
|
| 57 |
+
static const std::string ATEN_PREFIX_;
|
| 58 |
+
|
| 59 |
+
// This field has value if this ScriptCall represents invocation of a builtin
|
| 60 |
+
// operator.
|
| 61 |
+
std::optional<std::shared_ptr<Operator>> op_;
|
| 62 |
+
// This field has non empty string if this ScriptCall represents invocation of
|
| 63 |
+
// an annotated torchscript function defined by users.
|
| 64 |
+
std::optional<const c10::QualifiedName> qualifiedName_;
|
| 65 |
+
std::vector<at::IValue> stack_;
|
| 66 |
+
const bool isAsyncExecution_;
|
| 67 |
+
};
|
| 68 |
+
|
| 69 |
+
} // namespace rpc
|
| 70 |
+
} // namespace distributed
|
| 71 |
+
} // namespace torch
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_resp.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 5 |
+
#include <torch/csrc/jit/serialization/pickler.h>
|
| 6 |
+
|
| 7 |
+
namespace torch {
|
| 8 |
+
namespace distributed {
|
| 9 |
+
namespace rpc {
|
| 10 |
+
|
| 11 |
+
// Return value of a builtin operator or a TorchScript function.
|
| 12 |
+
class TORCH_API ScriptResp final : public RpcCommandBase {
|
| 13 |
+
public:
|
| 14 |
+
explicit ScriptResp(at::IValue&& values);
|
| 15 |
+
|
| 16 |
+
const at::IValue& value();
|
| 17 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 18 |
+
static std::unique_ptr<ScriptResp> fromMessage(const Message& message);
|
| 19 |
+
|
| 20 |
+
private:
|
| 21 |
+
const at::IValue value_;
|
| 22 |
+
};
|
| 23 |
+
|
| 24 |
+
} // namespace rpc
|
| 25 |
+
} // namespace distributed
|
| 26 |
+
} // namespace torch
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/torchscript_functions.h
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/ivalue.h>
|
| 4 |
+
#include <torch/csrc/autograd/profiler.h>
|
| 5 |
+
#include <torch/csrc/distributed/autograd/utils.h>
|
| 6 |
+
#include <torch/csrc/distributed/rpc/rref_context.h>
|
| 7 |
+
#include <torch/csrc/distributed/rpc/script_remote_call.h>
|
| 8 |
+
|
| 9 |
+
namespace torch::distributed::rpc {
|
| 10 |
+
|
| 11 |
+
// This function sends an rpc call to run torchscript function, currently the
|
| 12 |
+
// torchscript function could only be a user defined python function with
|
| 13 |
+
// "@torch.jit.script" annotation. The torchscript function could not be
|
| 14 |
+
// a class constructor, class method, instance method or a script module.
|
| 15 |
+
// dst: destination worker name
|
| 16 |
+
// qualifiedName: torchscript function qualified name string like
|
| 17 |
+
// "moduleName::torchscriptFunctionName", e.g,
|
| 18 |
+
// "dist_autograd_test::my_py_add"
|
| 19 |
+
// stack: a bag of IValue args passed to torchscriptFunctionName
|
| 20 |
+
// It returns c10::intrusive_ptr<ivalue::Future>
|
| 21 |
+
c10::intrusive_ptr<c10::ivalue::Future> TORCH_API rpcTorchscript(
|
| 22 |
+
const std::string& dstWorkerName,
|
| 23 |
+
const c10::QualifiedName& qualifiedName,
|
| 24 |
+
const c10::FunctionSchema& functionSchema,
|
| 25 |
+
std::vector<c10::IValue>& stack,
|
| 26 |
+
const float rpcTimeoutSeconds = torch::distributed::rpc::kUnsetRpcTimeout,
|
| 27 |
+
const bool isAsyncExecution = false);
|
| 28 |
+
|
| 29 |
+
c10::intrusive_ptr<RRef> TORCH_API remoteTorchscript(
|
| 30 |
+
const std::string& dstWorkerName,
|
| 31 |
+
const c10::QualifiedName& qualifiedName,
|
| 32 |
+
const c10::FunctionSchema& functionSchema,
|
| 33 |
+
std::vector<c10::IValue>& stack,
|
| 34 |
+
const float rpcTimeoutSeconds = torch::distributed::rpc::kUnsetRpcTimeout,
|
| 35 |
+
const bool isAsyncExecution = false);
|
| 36 |
+
|
| 37 |
+
} // namespace torch::distributed::rpc
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/types.h
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/ivalue.h>
|
| 4 |
+
#include <atomic>
|
| 5 |
+
|
| 6 |
+
namespace torch::distributed::rpc {
|
| 7 |
+
|
| 8 |
+
using worker_id_t = int16_t;
|
| 9 |
+
using local_id_t = int64_t;
|
| 10 |
+
|
| 11 |
+
bool getAllowJitRRefPickle();
|
| 12 |
+
TORCH_API void enableJitRRefPickle();
|
| 13 |
+
TORCH_API void disableJitRRefPickle();
|
| 14 |
+
|
| 15 |
+
struct TORCH_API JitRRefPickleGuard {
|
| 16 |
+
JitRRefPickleGuard();
|
| 17 |
+
~JitRRefPickleGuard();
|
| 18 |
+
};
|
| 19 |
+
|
| 20 |
+
struct TORCH_API GloballyUniqueId final {
|
| 21 |
+
GloballyUniqueId(worker_id_t createdOn, local_id_t localId);
|
| 22 |
+
GloballyUniqueId(const GloballyUniqueId& other) = default;
|
| 23 |
+
GloballyUniqueId& operator=(const GloballyUniqueId& other) = delete;
|
| 24 |
+
|
| 25 |
+
bool operator==(const GloballyUniqueId& other) const;
|
| 26 |
+
bool operator!=(const GloballyUniqueId& other) const;
|
| 27 |
+
|
| 28 |
+
at::IValue toIValue() const;
|
| 29 |
+
static GloballyUniqueId fromIValue(const at::IValue&);
|
| 30 |
+
|
| 31 |
+
struct Hash {
|
| 32 |
+
size_t operator()(const GloballyUniqueId& key) const {
|
| 33 |
+
return (uint64_t(key.createdOn_) << kLocalIdBits) | key.localId_;
|
| 34 |
+
}
|
| 35 |
+
};
|
| 36 |
+
|
| 37 |
+
static constexpr int kLocalIdBits = 48;
|
| 38 |
+
|
| 39 |
+
const worker_id_t createdOn_;
|
| 40 |
+
const local_id_t localId_;
|
| 41 |
+
};
|
| 42 |
+
|
| 43 |
+
TORCH_API std::ostream& operator<<(
|
| 44 |
+
std::ostream& os,
|
| 45 |
+
const GloballyUniqueId& globalId);
|
| 46 |
+
|
| 47 |
+
using RRefId = GloballyUniqueId;
|
| 48 |
+
using ForkId = GloballyUniqueId;
|
| 49 |
+
using ProfilingId = GloballyUniqueId;
|
| 50 |
+
|
| 51 |
+
struct TORCH_API SerializedPyObj final {
|
| 52 |
+
SerializedPyObj(std::string&& payload, std::vector<at::Tensor>&& tensors)
|
| 53 |
+
: payload_(std::move(payload)), tensors_(std::move(tensors)) {}
|
| 54 |
+
|
| 55 |
+
std::vector<at::IValue> toIValues() &&;
|
| 56 |
+
static SerializedPyObj fromIValues(std::vector<at::IValue> value);
|
| 57 |
+
|
| 58 |
+
std::string payload_;
|
| 59 |
+
std::vector<at::Tensor> tensors_;
|
| 60 |
+
};
|
| 61 |
+
|
| 62 |
+
} // namespace torch::distributed::rpc
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_call.h
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 5 |
+
#include <torch/csrc/utils/pybind.h>
|
| 6 |
+
|
| 7 |
+
namespace torch::distributed::rpc {
|
| 8 |
+
|
| 9 |
+
// This class converts the content in a PythonCall into py::object. This is a
|
| 10 |
+
// helper class to make sure that all arguments deserialization is done before
|
| 11 |
+
// entering RequestCallbackImpl::processRpc(...), so that the deserialization
|
| 12 |
+
// related logic can be carried out in one spot instead of scattered in multiple
|
| 13 |
+
// places for different message types.
|
| 14 |
+
// NB: The reason for not consolidating class into PythonCall is because
|
| 15 |
+
// PythonCall is a libtorch type which should not depend on Python types.
|
| 16 |
+
class TORCH_API UnpickledPythonCall : public RpcCommandBase {
|
| 17 |
+
public:
|
| 18 |
+
UnpickledPythonCall(
|
| 19 |
+
const SerializedPyObj& serializedPyObj,
|
| 20 |
+
bool isAsyncExecution);
|
| 21 |
+
~UnpickledPythonCall() override;
|
| 22 |
+
|
| 23 |
+
// toMessage() method is not implemented, as objects of this class should
|
| 24 |
+
// never be directly converted into a Message object.
|
| 25 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 26 |
+
const py::object& pythonUdf() const;
|
| 27 |
+
|
| 28 |
+
inline bool isAsyncExecution() const {
|
| 29 |
+
return isAsyncExecution_;
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
private:
|
| 33 |
+
py::object pythonUdf_;
|
| 34 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
| 35 |
+
const bool isAsyncExecution_;
|
| 36 |
+
};
|
| 37 |
+
|
| 38 |
+
} // namespace torch::distributed::rpc
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_remote_call.h
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 5 |
+
#include <torch/csrc/distributed/rpc/unpickled_python_call.h>
|
| 6 |
+
#include <torch/csrc/utils/pybind.h>
|
| 7 |
+
|
| 8 |
+
namespace torch::distributed::rpc {
|
| 9 |
+
|
| 10 |
+
// This class converts the content in a PythonRemoteCall into py::object. This
|
| 11 |
+
// is a helper class to make sure that all arguments deserialization is done
|
| 12 |
+
// before entering RequestCallbackImpl::processRpc(...), so that the
|
| 13 |
+
// deserialization related logic can be carried out in one spot instead of
|
| 14 |
+
// scattered in multiple places for different message types.
|
| 15 |
+
// NB: The reason for not consolidating class into PythonRemoteCall is because
|
| 16 |
+
// PythonRemoteCall is a libtorch type which should not depend on Python types.
|
| 17 |
+
class TORCH_API UnpickledPythonRemoteCall final : public UnpickledPythonCall {
|
| 18 |
+
public:
|
| 19 |
+
explicit UnpickledPythonRemoteCall(
|
| 20 |
+
const SerializedPyObj& serializedPyObj,
|
| 21 |
+
const at::IValue& retRRefId,
|
| 22 |
+
const at::IValue& retForkId,
|
| 23 |
+
const bool isAsyncExecution);
|
| 24 |
+
|
| 25 |
+
const RRefId& rrefId() const;
|
| 26 |
+
const ForkId& forkId() const;
|
| 27 |
+
|
| 28 |
+
private:
|
| 29 |
+
RRefId rrefId_;
|
| 30 |
+
ForkId forkId_;
|
| 31 |
+
};
|
| 32 |
+
|
| 33 |
+
} // namespace torch::distributed::rpc
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/utils.h
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/Device.h>
|
| 4 |
+
#include <c10/core/Event.h>
|
| 5 |
+
#include <c10/core/Stream.h>
|
| 6 |
+
#include <torch/csrc/autograd/profiler.h>
|
| 7 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 8 |
+
#include <torch/csrc/jit/serialization/pickle.h>
|
| 9 |
+
#include <torch/csrc/utils/byte_order.h>
|
| 10 |
+
|
| 11 |
+
namespace torch {
|
| 12 |
+
namespace distributed {
|
| 13 |
+
namespace rpc {
|
| 14 |
+
|
| 15 |
+
// Parse error message and return RPCErrorType based on the message.
|
| 16 |
+
TORCH_API RPCErrorType getRPCErrorType(const JitFuture& jitFuture);
|
| 17 |
+
// Create an error string given the error description and error type
|
| 18 |
+
TORCH_API std::string makeRPCError(
|
| 19 |
+
const std::string& rpcErrorStr,
|
| 20 |
+
RPCErrorType errorType);
|
| 21 |
+
|
| 22 |
+
// Given an RPC message received as a request over the wire, deserialize it into
|
| 23 |
+
// the appropriate 'RpcCommandBase' type.
|
| 24 |
+
TORCH_API std::unique_ptr<RpcCommandBase> deserializeRequest(
|
| 25 |
+
const Message& request);
|
| 26 |
+
|
| 27 |
+
// Given an RPC message received as a response over the wire, deserialize it
|
| 28 |
+
// into the appropriate 'RpcCommandBase' type, if the response is
|
| 29 |
+
// FORWARD_AUTOGRAD_RESP type, unwrap it, attach recvBackward() functions
|
| 30 |
+
// to received tensors and set the wrappedMsgType to its wrapped message type.
|
| 31 |
+
TORCH_API std::unique_ptr<RpcCommandBase> deserializeResponse(
|
| 32 |
+
const Message& response,
|
| 33 |
+
MessageType& wrappedMsgType);
|
| 34 |
+
|
| 35 |
+
// Given an RPC message received as a response over the wire, deserialize it
|
| 36 |
+
// into the valid IValue if the message is for a script rpc result,
|
| 37 |
+
// otherwise deserialize it into dummy none ivalue that will never be used.
|
| 38 |
+
// In this deserialization, we also attach recv rpc backward functions if
|
| 39 |
+
// needed.
|
| 40 |
+
IValue deserializeResptoIValueInternal(
|
| 41 |
+
RpcCommandBase& rpc,
|
| 42 |
+
MessageType messageType);
|
| 43 |
+
TORCH_API IValue deserializeRespToIValue(const Message& message);
|
| 44 |
+
|
| 45 |
+
// Note: format is subject to change and intended for RPCs.
|
| 46 |
+
// For saving persistently to disk, use torch::save().
|
| 47 |
+
TORCH_API std::string wireSerialize(
|
| 48 |
+
const std::vector<char>& payload,
|
| 49 |
+
const std::vector<at::Tensor>& tensors);
|
| 50 |
+
|
| 51 |
+
TORCH_API std::pair<std::vector<char>, std::vector<at::Tensor>> wireDeserialize(
|
| 52 |
+
const void* data,
|
| 53 |
+
size_t data_size);
|
| 54 |
+
|
| 55 |
+
// We use vector<char> as the type of blobs because it's what rpc::Message uses
|
| 56 |
+
// for its payload, even though it has the disadvantage that it cannot be
|
| 57 |
+
// allocated with uninitialized memory: it is always zeroed out.
|
| 58 |
+
|
| 59 |
+
// Some Tensors are effectively views of larger Tensors, where only a small
|
| 60 |
+
// subset of the Storage data is referenced. This normally is good and avoids
|
| 61 |
+
// copies when kept locally, but if we naively push the whole Storage over the
|
| 62 |
+
// wire, we'll end up with excess network traffic. This change clones tensors if
|
| 63 |
+
// we'd save at least half the data, and over a minimum hurdle.
|
| 64 |
+
TORCH_API c10::List<at::Tensor> cloneSparseTensors(
|
| 65 |
+
const std::vector<at::Tensor>& tensors);
|
| 66 |
+
|
| 67 |
+
// Combines an original payload and wrapped payload into the original payload.
|
| 68 |
+
// Used to generate the overall payload for the wrapped RPC.
|
| 69 |
+
TORCH_API void writeWrappedPayload(
|
| 70 |
+
std::vector<char>& originalPayload,
|
| 71 |
+
std::vector<char>& additionalPayload);
|
| 72 |
+
|
| 73 |
+
// Reads the additional, wrapped payload from a wrapped RPC off of the input
|
| 74 |
+
// payload. After this, payload will contain the payload of the original,
|
| 75 |
+
// un-wrapped RPC.
|
| 76 |
+
TORCH_API std::vector<at::IValue> readWrappedPayload(
|
| 77 |
+
std::vector<char>& payload,
|
| 78 |
+
const rpc::Message& message);
|
| 79 |
+
|
| 80 |
+
// Takes a list of events from autograd profiler and populates them into
|
| 81 |
+
// profiledEvents to be carried over RPC.
|
| 82 |
+
TORCH_API void populateRemoteProfiledEvents(
|
| 83 |
+
std::vector<torch::autograd::profiler::LegacyEvent>& profiledEvents,
|
| 84 |
+
const torch::autograd::profiler::ProfilerConfig& profilerConfig,
|
| 85 |
+
const std::vector<std::vector<torch::autograd::profiler::LegacyEvent>>&
|
| 86 |
+
eventLists);
|
| 87 |
+
|
| 88 |
+
} // namespace rpc
|
| 89 |
+
} // namespace distributed
|
| 90 |
+
} // namespace torch
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cpp_stacktraces.h
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/Export.h>
|
| 4 |
+
#include <torch/csrc/profiler/unwind/unwind.h>
|
| 5 |
+
|
| 6 |
+
namespace torch {
|
| 7 |
+
TORCH_API bool get_cpp_stacktraces_enabled();
|
| 8 |
+
TORCH_API torch::unwind::Mode get_symbolize_mode();
|
| 9 |
+
} // namespace torch
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cuda_enabled.h
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
namespace torch::utils {
|
| 4 |
+
|
| 5 |
+
inline constexpr bool cuda_enabled() {
|
| 6 |
+
#ifdef USE_CUDA
|
| 7 |
+
return true;
|
| 8 |
+
#else
|
| 9 |
+
return false;
|
| 10 |
+
#endif
|
| 11 |
+
}
|
| 12 |
+
|
| 13 |
+
} // namespace torch::utils
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/object_ptr.h
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/Export.h>
|
| 4 |
+
#include <torch/csrc/python_headers.h>
|
| 5 |
+
#include <utility>
|
| 6 |
+
|
| 7 |
+
template <class T>
|
| 8 |
+
class TORCH_PYTHON_API THPPointer {
|
| 9 |
+
public:
|
| 10 |
+
THPPointer() : ptr(nullptr){};
|
| 11 |
+
explicit THPPointer(T* ptr) noexcept : ptr(ptr){};
|
| 12 |
+
THPPointer(THPPointer&& p) noexcept : ptr(std::exchange(p.ptr, nullptr)) {}
|
| 13 |
+
|
| 14 |
+
~THPPointer() {
|
| 15 |
+
free();
|
| 16 |
+
};
|
| 17 |
+
T* get() {
|
| 18 |
+
return ptr;
|
| 19 |
+
}
|
| 20 |
+
const T* get() const {
|
| 21 |
+
return ptr;
|
| 22 |
+
}
|
| 23 |
+
T* release() {
|
| 24 |
+
T* tmp = ptr;
|
| 25 |
+
ptr = nullptr;
|
| 26 |
+
return tmp;
|
| 27 |
+
}
|
| 28 |
+
operator T*() {
|
| 29 |
+
return ptr;
|
| 30 |
+
}
|
| 31 |
+
THPPointer& operator=(T* new_ptr) noexcept {
|
| 32 |
+
free();
|
| 33 |
+
ptr = new_ptr;
|
| 34 |
+
return *this;
|
| 35 |
+
}
|
| 36 |
+
THPPointer& operator=(THPPointer&& p) noexcept {
|
| 37 |
+
free();
|
| 38 |
+
ptr = p.ptr;
|
| 39 |
+
p.ptr = nullptr;
|
| 40 |
+
return *this;
|
| 41 |
+
}
|
| 42 |
+
T* operator->() {
|
| 43 |
+
return ptr;
|
| 44 |
+
}
|
| 45 |
+
explicit operator bool() const {
|
| 46 |
+
return ptr != nullptr;
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
private:
|
| 50 |
+
void free();
|
| 51 |
+
T* ptr = nullptr;
|
| 52 |
+
};
|
| 53 |
+
|
| 54 |
+
/**
|
| 55 |
+
* An RAII-style, owning pointer to a PyObject. You must protect
|
| 56 |
+
* destruction of this object with the GIL.
|
| 57 |
+
*
|
| 58 |
+
* WARNING: Think twice before putting this as a field in a C++
|
| 59 |
+
* struct. This class does NOT take out the GIL on destruction,
|
| 60 |
+
* so if you will need to ensure that the destructor of your struct
|
| 61 |
+
* is either (a) always invoked when the GIL is taken or (b) takes
|
| 62 |
+
* out the GIL itself. Easiest way to avoid this problem is to
|
| 63 |
+
* not use THPPointer in this situation.
|
| 64 |
+
*/
|
| 65 |
+
using THPObjectPtr = THPPointer<PyObject>;
|
| 66 |
+
using THPCodeObjectPtr = THPPointer<PyCodeObject>;
|
| 67 |
+
using THPFrameObjectPtr = THPPointer<PyFrameObject>;
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pyobject_preservation.h
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/python_headers.h>
|
| 4 |
+
|
| 5 |
+
// This file contains utilities used for handling PyObject preservation
|
| 6 |
+
|
| 7 |
+
void clear_slots(PyTypeObject* type, PyObject* self);
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_scalars.h
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/ATen.h>
|
| 4 |
+
#include <c10/util/TypeCast.h>
|
| 5 |
+
#include <torch/csrc/python_headers.h>
|
| 6 |
+
|
| 7 |
+
#include <torch/csrc/Exceptions.h>
|
| 8 |
+
#include <torch/csrc/utils/python_numbers.h>
|
| 9 |
+
|
| 10 |
+
namespace torch::utils {
|
| 11 |
+
|
| 12 |
+
template <typename T>
|
| 13 |
+
inline T unpackIntegral(PyObject* obj, const char* type) {
|
| 14 |
+
#if PY_VERSION_HEX >= 0x030a00f0
|
| 15 |
+
// In Python-3.10 floats can no longer be silently converted to integers
|
| 16 |
+
// Keep backward compatible behavior for now
|
| 17 |
+
if (PyFloat_Check(obj)) {
|
| 18 |
+
return c10::checked_convert<T>(THPUtils_unpackDouble(obj), type);
|
| 19 |
+
}
|
| 20 |
+
return c10::checked_convert<T>(THPUtils_unpackLong(obj), type);
|
| 21 |
+
#else
|
| 22 |
+
return static_cast<T>(THPUtils_unpackLong(obj));
|
| 23 |
+
#endif
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
inline void store_scalar(void* data, at::ScalarType scalarType, PyObject* obj) {
|
| 27 |
+
switch (scalarType) {
|
| 28 |
+
case at::kByte:
|
| 29 |
+
*(uint8_t*)data = unpackIntegral<uint8_t>(obj, "uint8");
|
| 30 |
+
break;
|
| 31 |
+
case at::kUInt16:
|
| 32 |
+
*(uint16_t*)data = unpackIntegral<uint16_t>(obj, "uint16");
|
| 33 |
+
break;
|
| 34 |
+
case at::kUInt32:
|
| 35 |
+
*(uint32_t*)data = unpackIntegral<uint32_t>(obj, "uint32");
|
| 36 |
+
break;
|
| 37 |
+
case at::kUInt64:
|
| 38 |
+
// NB: This doesn't allow implicit conversion of float to int
|
| 39 |
+
*(uint64_t*)data = THPUtils_unpackUInt64(obj);
|
| 40 |
+
break;
|
| 41 |
+
case at::kChar:
|
| 42 |
+
*(int8_t*)data = unpackIntegral<int8_t>(obj, "int8");
|
| 43 |
+
break;
|
| 44 |
+
case at::kShort:
|
| 45 |
+
*(int16_t*)data = unpackIntegral<int16_t>(obj, "int16");
|
| 46 |
+
break;
|
| 47 |
+
case at::kInt:
|
| 48 |
+
*(int32_t*)data = unpackIntegral<int32_t>(obj, "int32");
|
| 49 |
+
break;
|
| 50 |
+
case at::kLong:
|
| 51 |
+
*(int64_t*)data = unpackIntegral<int64_t>(obj, "int64");
|
| 52 |
+
break;
|
| 53 |
+
case at::kHalf:
|
| 54 |
+
*(at::Half*)data =
|
| 55 |
+
at::convert<at::Half, double>(THPUtils_unpackDouble(obj));
|
| 56 |
+
break;
|
| 57 |
+
case at::kFloat:
|
| 58 |
+
*(float*)data = (float)THPUtils_unpackDouble(obj);
|
| 59 |
+
break;
|
| 60 |
+
case at::kDouble:
|
| 61 |
+
*(double*)data = THPUtils_unpackDouble(obj);
|
| 62 |
+
break;
|
| 63 |
+
case at::kComplexHalf:
|
| 64 |
+
*(c10::complex<at::Half>*)data =
|
| 65 |
+
(c10::complex<at::Half>)static_cast<c10::complex<float>>(
|
| 66 |
+
THPUtils_unpackComplexDouble(obj));
|
| 67 |
+
break;
|
| 68 |
+
case at::kComplexFloat:
|
| 69 |
+
*(c10::complex<float>*)data =
|
| 70 |
+
(c10::complex<float>)THPUtils_unpackComplexDouble(obj);
|
| 71 |
+
break;
|
| 72 |
+
case at::kComplexDouble:
|
| 73 |
+
*(c10::complex<double>*)data = THPUtils_unpackComplexDouble(obj);
|
| 74 |
+
break;
|
| 75 |
+
case at::kBool:
|
| 76 |
+
*(bool*)data = THPUtils_unpackNumberAsBool(obj);
|
| 77 |
+
break;
|
| 78 |
+
case at::kBFloat16:
|
| 79 |
+
*(at::BFloat16*)data =
|
| 80 |
+
at::convert<at::BFloat16, double>(THPUtils_unpackDouble(obj));
|
| 81 |
+
break;
|
| 82 |
+
case at::kFloat8_e5m2:
|
| 83 |
+
*(at::Float8_e5m2*)data =
|
| 84 |
+
at::convert<at::Float8_e5m2, double>(THPUtils_unpackDouble(obj));
|
| 85 |
+
break;
|
| 86 |
+
case at::kFloat8_e5m2fnuz:
|
| 87 |
+
*(at::Float8_e5m2fnuz*)data =
|
| 88 |
+
at::convert<at::Float8_e5m2fnuz, double>(THPUtils_unpackDouble(obj));
|
| 89 |
+
break;
|
| 90 |
+
case at::kFloat8_e4m3fn:
|
| 91 |
+
*(at::Float8_e4m3fn*)data =
|
| 92 |
+
at::convert<at::Float8_e4m3fn, double>(THPUtils_unpackDouble(obj));
|
| 93 |
+
break;
|
| 94 |
+
case at::kFloat8_e4m3fnuz:
|
| 95 |
+
*(at::Float8_e4m3fnuz*)data =
|
| 96 |
+
at::convert<at::Float8_e4m3fnuz, double>(THPUtils_unpackDouble(obj));
|
| 97 |
+
break;
|
| 98 |
+
default:
|
| 99 |
+
throw std::runtime_error("invalid type");
|
| 100 |
+
}
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
inline PyObject* load_scalar(const void* data, at::ScalarType scalarType) {
|
| 104 |
+
switch (scalarType) {
|
| 105 |
+
case at::kByte:
|
| 106 |
+
return THPUtils_packInt64(*(uint8_t*)data);
|
| 107 |
+
case at::kUInt16:
|
| 108 |
+
return THPUtils_packInt64(*(uint16_t*)data);
|
| 109 |
+
case at::kUInt32:
|
| 110 |
+
return THPUtils_packUInt32(*(uint32_t*)data);
|
| 111 |
+
case at::kUInt64:
|
| 112 |
+
return THPUtils_packUInt64(*(uint64_t*)data);
|
| 113 |
+
case at::kChar:
|
| 114 |
+
return THPUtils_packInt64(*(int8_t*)data);
|
| 115 |
+
case at::kShort:
|
| 116 |
+
return THPUtils_packInt64(*(int16_t*)data);
|
| 117 |
+
case at::kInt:
|
| 118 |
+
return THPUtils_packInt64(*(int32_t*)data);
|
| 119 |
+
case at::kLong:
|
| 120 |
+
return THPUtils_packInt64(*(int64_t*)data);
|
| 121 |
+
case at::kHalf:
|
| 122 |
+
return PyFloat_FromDouble(
|
| 123 |
+
at::convert<double, at::Half>(*(at::Half*)data));
|
| 124 |
+
case at::kFloat:
|
| 125 |
+
return PyFloat_FromDouble(*(float*)data);
|
| 126 |
+
case at::kDouble:
|
| 127 |
+
return PyFloat_FromDouble(*(double*)data);
|
| 128 |
+
case at::kComplexHalf: {
|
| 129 |
+
auto data_ = reinterpret_cast<const c10::complex<at::Half>*>(data);
|
| 130 |
+
return PyComplex_FromDoubles(data_->real(), data_->imag());
|
| 131 |
+
}
|
| 132 |
+
case at::kComplexFloat: {
|
| 133 |
+
auto data_ = reinterpret_cast<const c10::complex<float>*>(data);
|
| 134 |
+
return PyComplex_FromDoubles(data_->real(), data_->imag());
|
| 135 |
+
}
|
| 136 |
+
case at::kComplexDouble:
|
| 137 |
+
return PyComplex_FromCComplex(
|
| 138 |
+
*reinterpret_cast<Py_complex*>((c10::complex<double>*)data));
|
| 139 |
+
case at::kBool:
|
| 140 |
+
return PyBool_FromLong(*(bool*)data);
|
| 141 |
+
case at::kBFloat16:
|
| 142 |
+
return PyFloat_FromDouble(
|
| 143 |
+
at::convert<double, at::BFloat16>(*(at::BFloat16*)data));
|
| 144 |
+
case at::kFloat8_e5m2:
|
| 145 |
+
return PyFloat_FromDouble(
|
| 146 |
+
at::convert<double, at::Float8_e5m2>(*(at::Float8_e5m2*)data));
|
| 147 |
+
case at::kFloat8_e4m3fn:
|
| 148 |
+
return PyFloat_FromDouble(
|
| 149 |
+
at::convert<double, at::Float8_e4m3fn>(*(at::Float8_e4m3fn*)data));
|
| 150 |
+
case at::kFloat8_e5m2fnuz:
|
| 151 |
+
return PyFloat_FromDouble(at::convert<double, at::Float8_e5m2fnuz>(
|
| 152 |
+
*(at::Float8_e5m2fnuz*)data));
|
| 153 |
+
case at::kFloat8_e4m3fnuz:
|
| 154 |
+
return PyFloat_FromDouble(at::convert<double, at::Float8_e4m3fnuz>(
|
| 155 |
+
*(at::Float8_e4m3fnuz*)data));
|
| 156 |
+
default:
|
| 157 |
+
throw std::runtime_error("invalid type");
|
| 158 |
+
}
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
} // namespace torch::utils
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_new.h
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/python_headers.h>
|
| 4 |
+
#include <torch/csrc/utils/python_arg_parser.h>
|
| 5 |
+
|
| 6 |
+
#include <ATen/core/Tensor.h>
|
| 7 |
+
|
| 8 |
+
namespace torch::utils {
|
| 9 |
+
|
| 10 |
+
// NOTE: [torch.tensor, lift_fresh, and device movement]
|
| 11 |
+
//
|
| 12 |
+
// The `only_lift_cpu_tensors` flag controls what happens on torch.tensor([1, 2,
|
| 13 |
+
// 3], device="cuda") (or any non-CPU devices).
|
| 14 |
+
//
|
| 15 |
+
// If false (default):
|
| 16 |
+
// - the data gets moved into a CPU Tensor
|
| 17 |
+
// - then, it gets moved to cuda (via .to)
|
| 18 |
+
// - finally, we call lift_fresh() on it.
|
| 19 |
+
// Steps 1 and 2 happen with all modes disabled.
|
| 20 |
+
//
|
| 21 |
+
// If true:
|
| 22 |
+
// - the data gets moved into a CPU Tensor (with correct dtype)
|
| 23 |
+
// - we call lift_fresh() on it
|
| 24 |
+
// - finally, we move it to cuda (via .to)
|
| 25 |
+
// Step 1 happens with all modes disabled.
|
| 26 |
+
//
|
| 27 |
+
// `only_lift_cpu_tensors=true` is useful to prevent CUDA initialization under
|
| 28 |
+
// FakeTensorMode because it avoids moving concrete data to CUDA.
|
| 29 |
+
TORCH_API bool only_lift_cpu_tensors();
|
| 30 |
+
TORCH_API void set_only_lift_cpu_tensors(bool value);
|
| 31 |
+
|
| 32 |
+
at::Tensor base_tensor_ctor(PyObject* args, PyObject* kwargs);
|
| 33 |
+
at::Tensor legacy_tensor_ctor(
|
| 34 |
+
c10::DispatchKey dispatch_key,
|
| 35 |
+
at::ScalarType scalar_type,
|
| 36 |
+
PyObject* args,
|
| 37 |
+
PyObject* kwargs);
|
| 38 |
+
at::Tensor legacy_tensor_new(
|
| 39 |
+
c10::DispatchKey dispatch_key,
|
| 40 |
+
at::ScalarType scalar_type,
|
| 41 |
+
PyObject* args,
|
| 42 |
+
PyObject* kwargs);
|
| 43 |
+
at::Tensor indexing_tensor_from_data(
|
| 44 |
+
c10::TensorOptions options,
|
| 45 |
+
at::ScalarType scalar_type,
|
| 46 |
+
std::optional<at::Device> device,
|
| 47 |
+
PyObject* data);
|
| 48 |
+
at::Tensor sparse_coo_tensor_ctor(
|
| 49 |
+
c10::DispatchKey dispatch_key,
|
| 50 |
+
at::ScalarType scalar_type,
|
| 51 |
+
PythonArgs& r);
|
| 52 |
+
void _validate_sparse_coo_tensor_args(
|
| 53 |
+
c10::DispatchKey dispatch_key,
|
| 54 |
+
at::ScalarType scalar_type,
|
| 55 |
+
PyObject* args,
|
| 56 |
+
PyObject* kwargs);
|
| 57 |
+
|
| 58 |
+
at::Tensor sparse_compressed_tensor_ctor(
|
| 59 |
+
c10::DispatchKey dispatch_key,
|
| 60 |
+
at::ScalarType scalar_type,
|
| 61 |
+
PythonArgs& r);
|
| 62 |
+
at::Tensor sparse_csr_tensor_ctor(
|
| 63 |
+
c10::DispatchKey dispatch_key,
|
| 64 |
+
at::ScalarType scalar_type,
|
| 65 |
+
PythonArgs& r);
|
| 66 |
+
at::Tensor sparse_csc_tensor_ctor(
|
| 67 |
+
c10::DispatchKey dispatch_key,
|
| 68 |
+
at::ScalarType scalar_type,
|
| 69 |
+
PythonArgs& r);
|
| 70 |
+
at::Tensor sparse_bsr_tensor_ctor(
|
| 71 |
+
c10::DispatchKey dispatch_key,
|
| 72 |
+
at::ScalarType scalar_type,
|
| 73 |
+
PythonArgs& r);
|
| 74 |
+
at::Tensor sparse_bsc_tensor_ctor(
|
| 75 |
+
c10::DispatchKey dispatch_key,
|
| 76 |
+
at::ScalarType scalar_type,
|
| 77 |
+
PythonArgs& r);
|
| 78 |
+
|
| 79 |
+
void _validate_sparse_compressed_tensor_args(
|
| 80 |
+
c10::DispatchKey dispatch_key,
|
| 81 |
+
at::ScalarType scalar_type,
|
| 82 |
+
PyObject* args,
|
| 83 |
+
PyObject* kwargs);
|
| 84 |
+
void _validate_sparse_csr_tensor_args(
|
| 85 |
+
c10::DispatchKey dispatch_key,
|
| 86 |
+
at::ScalarType scalar_type,
|
| 87 |
+
PyObject* args,
|
| 88 |
+
PyObject* kwargs);
|
| 89 |
+
void _validate_sparse_csc_tensor_args(
|
| 90 |
+
c10::DispatchKey dispatch_key,
|
| 91 |
+
at::ScalarType scalar_type,
|
| 92 |
+
PyObject* args,
|
| 93 |
+
PyObject* kwargs);
|
| 94 |
+
void _validate_sparse_bsr_tensor_args(
|
| 95 |
+
c10::DispatchKey dispatch_key,
|
| 96 |
+
at::ScalarType scalar_type,
|
| 97 |
+
PyObject* args,
|
| 98 |
+
PyObject* kwargs);
|
| 99 |
+
void _validate_sparse_bsc_tensor_args(
|
| 100 |
+
c10::DispatchKey dispatch_key,
|
| 101 |
+
at::ScalarType scalar_type,
|
| 102 |
+
PyObject* args,
|
| 103 |
+
PyObject* kwargs);
|
| 104 |
+
|
| 105 |
+
at::Tensor tensor_ctor(
|
| 106 |
+
c10::DispatchKey dispatch_key,
|
| 107 |
+
at::ScalarType scalar_type,
|
| 108 |
+
PythonArgs& r);
|
| 109 |
+
at::Tensor as_tensor(
|
| 110 |
+
c10::DispatchKey dispatch_key,
|
| 111 |
+
at::ScalarType scalar_type,
|
| 112 |
+
PythonArgs& r);
|
| 113 |
+
at::Tensor new_tensor(
|
| 114 |
+
c10::DispatchKey dispatch_key,
|
| 115 |
+
at::ScalarType scalar_type,
|
| 116 |
+
PyObject* args,
|
| 117 |
+
PyObject* kwargs);
|
| 118 |
+
at::Tensor new_ones(
|
| 119 |
+
c10::DispatchKey dispatch_key,
|
| 120 |
+
at::ScalarType scalar_type,
|
| 121 |
+
PyObject* args,
|
| 122 |
+
PyObject* kwargs);
|
| 123 |
+
at::Tensor tensor_frombuffer(
|
| 124 |
+
PyObject* buffer,
|
| 125 |
+
at::ScalarType dtype,
|
| 126 |
+
int64_t count,
|
| 127 |
+
int64_t offset,
|
| 128 |
+
bool requires_grad);
|
| 129 |
+
at::Tensor tensor_fromDLPack(PyObject* data);
|
| 130 |
+
at::Tensor asarray(
|
| 131 |
+
PyObject* obj,
|
| 132 |
+
std::optional<c10::ScalarType> dtype,
|
| 133 |
+
std::optional<c10::Device> device,
|
| 134 |
+
std::optional<bool> copy,
|
| 135 |
+
bool requires_grad);
|
| 136 |
+
} // namespace torch::utils
|
mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/variadic.h
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Tensor.h>
|
| 4 |
+
#include <ATen/core/Variadic.h>
|
| 5 |
+
#include <torch/csrc/autograd/variable.h>
|
| 6 |
+
|
| 7 |
+
#include <type_traits>
|
| 8 |
+
#include <utility>
|
| 9 |
+
|
| 10 |
+
namespace torch {
|
| 11 |
+
|
| 12 |
+
using at::IterArgs;
|
| 13 |
+
|
| 14 |
+
struct CountTensors : IterArgs<CountTensors> {
|
| 15 |
+
size_t out = 0;
|
| 16 |
+
void operator()(const at::Tensor& x) {
|
| 17 |
+
out += 1;
|
| 18 |
+
}
|
| 19 |
+
void operator()(const std::optional<at::Tensor>& x) {
|
| 20 |
+
out += x.has_value();
|
| 21 |
+
}
|
| 22 |
+
void operator()(at::ArrayRef<at::Tensor> xs) {
|
| 23 |
+
out += xs.size();
|
| 24 |
+
}
|
| 25 |
+
};
|
| 26 |
+
|
| 27 |
+
template <typename... Args>
|
| 28 |
+
size_t count_tensors(Args&&... args) {
|
| 29 |
+
return CountTensors().apply(std::forward<Args>(args)...).out;
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
struct CountVariables : IterArgs<CountVariables> {
|
| 33 |
+
size_t out = 0;
|
| 34 |
+
void operator()(const autograd::Variable& x) {
|
| 35 |
+
out += 1;
|
| 36 |
+
}
|
| 37 |
+
void operator()(at::ArrayRef<autograd::Variable> xs) {
|
| 38 |
+
out += xs.size();
|
| 39 |
+
}
|
| 40 |
+
};
|
| 41 |
+
|
| 42 |
+
template <typename... Args>
|
| 43 |
+
inline size_t count_variables(Args&&... args) {
|
| 44 |
+
return CountVariables().apply(std::forward<Args>(args)...).out;
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
//===----------------------------------------------------------------------===//
|
| 48 |
+
// std::index_sequence shim for C++11
|
| 49 |
+
//===----------------------------------------------------------------------===//
|
| 50 |
+
|
| 51 |
+
// A container of type-template parameter indices.
|
| 52 |
+
template <size_t... Is>
|
| 53 |
+
struct Indices {};
|
| 54 |
+
|
| 55 |
+
// Decrements the index N, adds N-1 to the list of indices and forwards
|
| 56 |
+
// whatever we already have.
|
| 57 |
+
template <size_t N, size_t... Is>
|
| 58 |
+
struct MakeIndices : MakeIndices<N - 1, N - 1, Is...> {};
|
| 59 |
+
|
| 60 |
+
// Partial specialization that forms our base case. When N is zero, we stop
|
| 61 |
+
// and define a typedef that will be visible to earlier classes due to
|
| 62 |
+
// inheritance. The typedef we define is an index list containing the numbers
|
| 63 |
+
// 0 through N-1.
|
| 64 |
+
template <size_t... Is>
|
| 65 |
+
struct MakeIndices<0, Is...> {
|
| 66 |
+
using indices = Indices<Is...>;
|
| 67 |
+
};
|
| 68 |
+
|
| 69 |
+
//===----------------------------------------------------------------------===//
|
| 70 |
+
// Utilities
|
| 71 |
+
//===----------------------------------------------------------------------===//
|
| 72 |
+
|
| 73 |
+
template <typename Function, typename... Ts>
|
| 74 |
+
void apply(Function function, Ts&&... ts) {
|
| 75 |
+
// https://stackoverflow.com/questions/13978916/inserting-a-variadic-argument-list-into-a-vector
|
| 76 |
+
// Creates a dummy array, so that each function call is evaluated in order.
|
| 77 |
+
// `(function(), 0)` is because `function` should (!) return `void`, so
|
| 78 |
+
// according to the comma operator, it is evaluated and its result (`void`)
|
| 79 |
+
// is discarded. Then the zero is evaluated and used as an element in the
|
| 80 |
+
// array. The first zero ensures the array is not empty.
|
| 81 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
|
| 82 |
+
int _[]{0, (function(std::forward<Ts>(ts)), 0)...};
|
| 83 |
+
(void)_;
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
template <
|
| 87 |
+
typename ReturnType,
|
| 88 |
+
typename... Ts,
|
| 89 |
+
typename Function,
|
| 90 |
+
typename Accessor>
|
| 91 |
+
ReturnType unpack(Function function, Accessor accessor) {
|
| 92 |
+
return ReturnType(unpack<ReturnType, Ts...>(
|
| 93 |
+
std::move(function),
|
| 94 |
+
std::move(accessor),
|
| 95 |
+
typename MakeIndices<sizeof...(Ts)>::indices()));
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
template <
|
| 99 |
+
typename ReturnType,
|
| 100 |
+
typename... Ts,
|
| 101 |
+
typename Function,
|
| 102 |
+
typename Accessor,
|
| 103 |
+
size_t... Is>
|
| 104 |
+
ReturnType unpack(Function function, Accessor accessor, Indices<Is...>) {
|
| 105 |
+
return ReturnType(function(accessor.template operator()<Ts>(Is)...));
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
} // namespace torch
|
openflamingo/lib/python3.10/site-packages/nltk/test/all.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Test suite that runs all NLTK tests.
|
| 2 |
+
|
| 3 |
+
This module, `nltk.test.all`, is named as the NLTK ``test_suite`` in the
|
| 4 |
+
project's ``setup-eggs.py`` file. Here, we create a test suite that
|
| 5 |
+
runs all of our doctests, and return it for processing by the setuptools
|
| 6 |
+
test harness.
|
| 7 |
+
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import doctest
|
| 11 |
+
import os.path
|
| 12 |
+
import unittest
|
| 13 |
+
from glob import glob
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def additional_tests():
|
| 17 |
+
# print("here-000000000000000")
|
| 18 |
+
# print("-----", glob(os.path.join(os.path.dirname(__file__), '*.doctest')))
|
| 19 |
+
dir = os.path.dirname(__file__)
|
| 20 |
+
paths = glob(os.path.join(dir, "*.doctest"))
|
| 21 |
+
files = [os.path.basename(path) for path in paths]
|
| 22 |
+
return unittest.TestSuite([doctest.DocFileSuite(file) for file in files])
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
# if os.path.split(path)[-1] != 'index.rst'
|
| 26 |
+
# skips time-dependent doctest in index.rst
|
openflamingo/lib/python3.10/site-packages/nltk/test/bnc.doctest
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2024 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
>>> import os.path
|
| 5 |
+
|
| 6 |
+
>>> from nltk.corpus.reader import BNCCorpusReader
|
| 7 |
+
>>> import nltk.test
|
| 8 |
+
|
| 9 |
+
>>> root = os.path.dirname(nltk.test.__file__)
|
| 10 |
+
>>> bnc = BNCCorpusReader(root=root, fileids='FX8.xml')
|
| 11 |
+
|
| 12 |
+
Checking the word access.
|
| 13 |
+
-------------------------
|
| 14 |
+
|
| 15 |
+
>>> len(bnc.words())
|
| 16 |
+
151
|
| 17 |
+
|
| 18 |
+
>>> bnc.words()[:6]
|
| 19 |
+
['Ah', 'there', 'we', 'are', ',', '.']
|
| 20 |
+
>>> bnc.words(stem=True)[:6]
|
| 21 |
+
['ah', 'there', 'we', 'be', ',', '.']
|
| 22 |
+
|
| 23 |
+
>>> bnc.tagged_words()[:6]
|
| 24 |
+
[('Ah', 'INTERJ'), ('there', 'ADV'), ('we', 'PRON'), ('are', 'VERB'), (',', 'PUN'), ('.', 'PUN')]
|
| 25 |
+
|
| 26 |
+
>>> bnc.tagged_words(c5=True)[:6]
|
| 27 |
+
[('Ah', 'ITJ'), ('there', 'AV0'), ('we', 'PNP'), ('are', 'VBB'), (',', 'PUN'), ('.', 'PUN')]
|
| 28 |
+
|
| 29 |
+
Testing access to the sentences.
|
| 30 |
+
--------------------------------
|
| 31 |
+
|
| 32 |
+
>>> len(bnc.sents())
|
| 33 |
+
15
|
| 34 |
+
|
| 35 |
+
>>> bnc.sents()[0]
|
| 36 |
+
['Ah', 'there', 'we', 'are', ',', '.']
|
| 37 |
+
>>> bnc.sents(stem=True)[0]
|
| 38 |
+
['ah', 'there', 'we', 'be', ',', '.']
|
| 39 |
+
|
| 40 |
+
>>> bnc.tagged_sents()[0]
|
| 41 |
+
[('Ah', 'INTERJ'), ('there', 'ADV'), ('we', 'PRON'), ('are', 'VERB'), (',', 'PUN'), ('.', 'PUN')]
|
| 42 |
+
>>> bnc.tagged_sents(c5=True)[0]
|
| 43 |
+
[('Ah', 'ITJ'), ('there', 'AV0'), ('we', 'PNP'), ('are', 'VBB'), (',', 'PUN'), ('.', 'PUN')]
|
| 44 |
+
|
| 45 |
+
A not lazy loader.
|
| 46 |
+
------------------
|
| 47 |
+
|
| 48 |
+
>>> eager = BNCCorpusReader(root=root, fileids=r'FX8.xml', lazy=False)
|
| 49 |
+
|
| 50 |
+
>>> len(eager.words())
|
| 51 |
+
151
|
| 52 |
+
>>> eager.words(stem=True)[6:17]
|
| 53 |
+
['right', 'abdominal', 'wound', ',', 'she', 'be', 'a', 'wee', 'bit', 'confuse', '.']
|
| 54 |
+
|
| 55 |
+
>>> eager.tagged_words()[6:11]
|
| 56 |
+
[('Right', 'ADV'), ('abdominal', 'ADJ'), ('wound', 'SUBST'), (',', 'PUN'), ('she', 'PRON')]
|
| 57 |
+
>>> eager.tagged_words(c5=True)[6:17]
|
| 58 |
+
[('Right', 'AV0'), ('abdominal', 'AJ0'), ('wound', 'NN1'), (',', 'PUN'), ('she', 'PNP'), ("'s", 'VBZ'), ('a', 'AT0'), ('wee', 'AJ0-NN1'), ('bit', 'NN1'), ('confused', 'VVN-AJ0'), ('.', 'PUN')]
|
| 59 |
+
>>> len(eager.sents())
|
| 60 |
+
15
|
openflamingo/lib/python3.10/site-packages/nltk/test/childes_fixt.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def setup_module():
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import nltk.data
|
| 5 |
+
|
| 6 |
+
try:
|
| 7 |
+
nltk.data.find("corpora/childes/data-xml/Eng-USA-MOR/")
|
| 8 |
+
except LookupError as e:
|
| 9 |
+
pytest.skip(
|
| 10 |
+
"The CHILDES corpus is not found. "
|
| 11 |
+
"It should be manually downloaded and saved/unpacked "
|
| 12 |
+
"to [NLTK_Data_Dir]/corpora/childes/"
|
| 13 |
+
)
|
openflamingo/lib/python3.10/site-packages/nltk/test/chunk.doctest
ADDED
|
@@ -0,0 +1,372 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2024 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
==========
|
| 5 |
+
Chunking
|
| 6 |
+
==========
|
| 7 |
+
|
| 8 |
+
>>> from nltk.chunk import *
|
| 9 |
+
>>> from nltk.chunk.util import *
|
| 10 |
+
>>> from nltk.chunk.regexp import *
|
| 11 |
+
>>> from nltk import Tree
|
| 12 |
+
|
| 13 |
+
>>> tagged_text = "[ The/DT cat/NN ] sat/VBD on/IN [ the/DT mat/NN ] [ the/DT dog/NN ] chewed/VBD ./."
|
| 14 |
+
>>> gold_chunked_text = tagstr2tree(tagged_text)
|
| 15 |
+
>>> unchunked_text = gold_chunked_text.flatten()
|
| 16 |
+
|
| 17 |
+
Chunking uses a special regexp syntax for rules that delimit the chunks. These
|
| 18 |
+
rules must be converted to 'regular' regular expressions before a sentence can
|
| 19 |
+
be chunked.
|
| 20 |
+
|
| 21 |
+
>>> tag_pattern = "<DT>?<JJ>*<NN.*>"
|
| 22 |
+
>>> regexp_pattern = tag_pattern2re_pattern(tag_pattern)
|
| 23 |
+
>>> regexp_pattern
|
| 24 |
+
'(<(DT)>)?(<(JJ)>)*(<(NN[^\\{\\}<>]*)>)'
|
| 25 |
+
|
| 26 |
+
Construct some new chunking rules.
|
| 27 |
+
|
| 28 |
+
>>> chunk_rule = ChunkRule(r"<.*>+", "Chunk everything")
|
| 29 |
+
>>> strip_rule = StripRule(r"<VBD|IN|\.>", "Strip on verbs/prepositions")
|
| 30 |
+
>>> split_rule = SplitRule("<DT><NN>", "<DT><NN>",
|
| 31 |
+
... "Split successive determiner/noun pairs")
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
Create and score a series of chunk parsers, successively more complex.
|
| 35 |
+
|
| 36 |
+
>>> chunk_parser = RegexpChunkParser([chunk_rule], chunk_label='NP')
|
| 37 |
+
>>> chunked_text = chunk_parser.parse(unchunked_text)
|
| 38 |
+
>>> print(chunked_text)
|
| 39 |
+
(S
|
| 40 |
+
(NP
|
| 41 |
+
The/DT
|
| 42 |
+
cat/NN
|
| 43 |
+
sat/VBD
|
| 44 |
+
on/IN
|
| 45 |
+
the/DT
|
| 46 |
+
mat/NN
|
| 47 |
+
the/DT
|
| 48 |
+
dog/NN
|
| 49 |
+
chewed/VBD
|
| 50 |
+
./.))
|
| 51 |
+
|
| 52 |
+
>>> chunkscore = ChunkScore()
|
| 53 |
+
>>> chunkscore.score(gold_chunked_text, chunked_text)
|
| 54 |
+
>>> print(chunkscore.precision())
|
| 55 |
+
0.0
|
| 56 |
+
|
| 57 |
+
>>> print(chunkscore.recall())
|
| 58 |
+
0.0
|
| 59 |
+
|
| 60 |
+
>>> print(chunkscore.f_measure())
|
| 61 |
+
0
|
| 62 |
+
|
| 63 |
+
>>> for chunk in sorted(chunkscore.missed()): print(chunk)
|
| 64 |
+
(NP The/DT cat/NN)
|
| 65 |
+
(NP the/DT dog/NN)
|
| 66 |
+
(NP the/DT mat/NN)
|
| 67 |
+
|
| 68 |
+
>>> for chunk in chunkscore.incorrect(): print(chunk)
|
| 69 |
+
(NP
|
| 70 |
+
The/DT
|
| 71 |
+
cat/NN
|
| 72 |
+
sat/VBD
|
| 73 |
+
on/IN
|
| 74 |
+
the/DT
|
| 75 |
+
mat/NN
|
| 76 |
+
the/DT
|
| 77 |
+
dog/NN
|
| 78 |
+
chewed/VBD
|
| 79 |
+
./.)
|
| 80 |
+
|
| 81 |
+
>>> chunk_parser = RegexpChunkParser([chunk_rule, strip_rule],
|
| 82 |
+
... chunk_label='NP')
|
| 83 |
+
>>> chunked_text = chunk_parser.parse(unchunked_text)
|
| 84 |
+
>>> print(chunked_text)
|
| 85 |
+
(S
|
| 86 |
+
(NP The/DT cat/NN)
|
| 87 |
+
sat/VBD
|
| 88 |
+
on/IN
|
| 89 |
+
(NP the/DT mat/NN the/DT dog/NN)
|
| 90 |
+
chewed/VBD
|
| 91 |
+
./.)
|
| 92 |
+
>>> assert chunked_text == chunk_parser.parse(list(unchunked_text))
|
| 93 |
+
|
| 94 |
+
>>> chunkscore = ChunkScore()
|
| 95 |
+
>>> chunkscore.score(gold_chunked_text, chunked_text)
|
| 96 |
+
>>> chunkscore.precision()
|
| 97 |
+
0.5
|
| 98 |
+
|
| 99 |
+
>>> print(chunkscore.recall())
|
| 100 |
+
0.33333333...
|
| 101 |
+
|
| 102 |
+
>>> print(chunkscore.f_measure())
|
| 103 |
+
0.4
|
| 104 |
+
|
| 105 |
+
>>> for chunk in sorted(chunkscore.missed()): print(chunk)
|
| 106 |
+
(NP the/DT dog/NN)
|
| 107 |
+
(NP the/DT mat/NN)
|
| 108 |
+
|
| 109 |
+
>>> for chunk in chunkscore.incorrect(): print(chunk)
|
| 110 |
+
(NP the/DT mat/NN the/DT dog/NN)
|
| 111 |
+
|
| 112 |
+
>>> chunk_parser = RegexpChunkParser([chunk_rule, strip_rule, split_rule],
|
| 113 |
+
... chunk_label='NP')
|
| 114 |
+
>>> chunked_text = chunk_parser.parse(unchunked_text, trace=True)
|
| 115 |
+
# Input:
|
| 116 |
+
<DT> <NN> <VBD> <IN> <DT> <NN> <DT> <NN> <VBD> <.>
|
| 117 |
+
# Chunk everything:
|
| 118 |
+
{<DT> <NN> <VBD> <IN> <DT> <NN> <DT> <NN> <VBD> <.>}
|
| 119 |
+
# Strip on verbs/prepositions:
|
| 120 |
+
{<DT> <NN>} <VBD> <IN> {<DT> <NN> <DT> <NN>} <VBD> <.>
|
| 121 |
+
# Split successive determiner/noun pairs:
|
| 122 |
+
{<DT> <NN>} <VBD> <IN> {<DT> <NN>}{<DT> <NN>} <VBD> <.>
|
| 123 |
+
>>> print(chunked_text)
|
| 124 |
+
(S
|
| 125 |
+
(NP The/DT cat/NN)
|
| 126 |
+
sat/VBD
|
| 127 |
+
on/IN
|
| 128 |
+
(NP the/DT mat/NN)
|
| 129 |
+
(NP the/DT dog/NN)
|
| 130 |
+
chewed/VBD
|
| 131 |
+
./.)
|
| 132 |
+
|
| 133 |
+
>>> chunkscore = ChunkScore()
|
| 134 |
+
>>> chunkscore.score(gold_chunked_text, chunked_text)
|
| 135 |
+
>>> chunkscore.precision()
|
| 136 |
+
1.0
|
| 137 |
+
|
| 138 |
+
>>> chunkscore.recall()
|
| 139 |
+
1.0
|
| 140 |
+
|
| 141 |
+
>>> chunkscore.f_measure()
|
| 142 |
+
1.0
|
| 143 |
+
|
| 144 |
+
>>> chunkscore.missed()
|
| 145 |
+
[]
|
| 146 |
+
|
| 147 |
+
>>> chunkscore.incorrect()
|
| 148 |
+
[]
|
| 149 |
+
|
| 150 |
+
>>> chunk_parser.rules()
|
| 151 |
+
[<ChunkRule: '<.*>+'>, <StripRule: '<VBD|IN|\\.>'>,
|
| 152 |
+
<SplitRule: '<DT><NN>', '<DT><NN>'>]
|
| 153 |
+
|
| 154 |
+
Printing parsers:
|
| 155 |
+
|
| 156 |
+
>>> print(repr(chunk_parser))
|
| 157 |
+
<RegexpChunkParser with 3 rules>
|
| 158 |
+
>>> print(chunk_parser)
|
| 159 |
+
RegexpChunkParser with 3 rules:
|
| 160 |
+
Chunk everything
|
| 161 |
+
<ChunkRule: '<.*>+'>
|
| 162 |
+
Strip on verbs/prepositions
|
| 163 |
+
<StripRule: '<VBD|IN|\\.>'>
|
| 164 |
+
Split successive determiner/noun pairs
|
| 165 |
+
<SplitRule: '<DT><NN>', '<DT><NN>'>
|
| 166 |
+
|
| 167 |
+
Regression Tests
|
| 168 |
+
~~~~~~~~~~~~~~~~
|
| 169 |
+
ChunkParserI
|
| 170 |
+
------------
|
| 171 |
+
`ChunkParserI` is an abstract interface -- it is not meant to be
|
| 172 |
+
instantiated directly.
|
| 173 |
+
|
| 174 |
+
>>> ChunkParserI().parse([])
|
| 175 |
+
Traceback (most recent call last):
|
| 176 |
+
. . .
|
| 177 |
+
NotImplementedError
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
ChunkString
|
| 181 |
+
-----------
|
| 182 |
+
ChunkString can be built from a tree of tagged tuples, a tree of
|
| 183 |
+
trees, or a mixed list of both:
|
| 184 |
+
|
| 185 |
+
>>> t1 = Tree('S', [('w%d' % i, 't%d' % i) for i in range(10)])
|
| 186 |
+
>>> t2 = Tree('S', [Tree('t0', []), Tree('t1', ['c1'])])
|
| 187 |
+
>>> t3 = Tree('S', [('w0', 't0'), Tree('t1', ['c1'])])
|
| 188 |
+
>>> ChunkString(t1)
|
| 189 |
+
<ChunkString: '<t0><t1><t2><t3><t4><t5><t6><t7><t8><t9>'>
|
| 190 |
+
>>> ChunkString(t2)
|
| 191 |
+
<ChunkString: '<t0><t1>'>
|
| 192 |
+
>>> ChunkString(t3)
|
| 193 |
+
<ChunkString: '<t0><t1>'>
|
| 194 |
+
|
| 195 |
+
Other values generate an error:
|
| 196 |
+
|
| 197 |
+
>>> ChunkString(Tree('S', ['x']))
|
| 198 |
+
Traceback (most recent call last):
|
| 199 |
+
. . .
|
| 200 |
+
ValueError: chunk structures must contain tagged tokens or trees
|
| 201 |
+
|
| 202 |
+
The `str()` for a chunk string adds spaces to it, which makes it line
|
| 203 |
+
up with `str()` output for other chunk strings over the same
|
| 204 |
+
underlying input.
|
| 205 |
+
|
| 206 |
+
>>> cs = ChunkString(t1)
|
| 207 |
+
>>> print(cs)
|
| 208 |
+
<t0> <t1> <t2> <t3> <t4> <t5> <t6> <t7> <t8> <t9>
|
| 209 |
+
>>> cs.xform('<t3>', '{<t3>}')
|
| 210 |
+
>>> print(cs)
|
| 211 |
+
<t0> <t1> <t2> {<t3>} <t4> <t5> <t6> <t7> <t8> <t9>
|
| 212 |
+
|
| 213 |
+
The `_verify()` method makes sure that our transforms don't corrupt
|
| 214 |
+
the chunk string. By setting debug_level=2, `_verify()` will be
|
| 215 |
+
called at the end of every call to `xform`.
|
| 216 |
+
|
| 217 |
+
>>> cs = ChunkString(t1, debug_level=3)
|
| 218 |
+
|
| 219 |
+
>>> # tag not marked with <...>:
|
| 220 |
+
>>> cs.xform('<t3>', 't3')
|
| 221 |
+
Traceback (most recent call last):
|
| 222 |
+
. . .
|
| 223 |
+
ValueError: Transformation generated invalid chunkstring:
|
| 224 |
+
<t0><t1><t2>t3<t4><t5><t6><t7><t8><t9>
|
| 225 |
+
|
| 226 |
+
>>> # brackets not balanced:
|
| 227 |
+
>>> cs.xform('<t3>', '{<t3>')
|
| 228 |
+
Traceback (most recent call last):
|
| 229 |
+
. . .
|
| 230 |
+
ValueError: Transformation generated invalid chunkstring:
|
| 231 |
+
<t0><t1><t2>{<t3><t4><t5><t6><t7><t8><t9>
|
| 232 |
+
|
| 233 |
+
>>> # nested brackets:
|
| 234 |
+
>>> cs.xform('<t3><t4><t5>', '{<t3>{<t4>}<t5>}')
|
| 235 |
+
Traceback (most recent call last):
|
| 236 |
+
. . .
|
| 237 |
+
ValueError: Transformation generated invalid chunkstring:
|
| 238 |
+
<t0><t1><t2>{<t3>{<t4>}<t5>}<t6><t7><t8><t9>
|
| 239 |
+
|
| 240 |
+
>>> # modified tags:
|
| 241 |
+
>>> cs.xform('<t3>', '<t9>')
|
| 242 |
+
Traceback (most recent call last):
|
| 243 |
+
. . .
|
| 244 |
+
ValueError: Transformation generated invalid chunkstring: tag changed
|
| 245 |
+
|
| 246 |
+
>>> # added tags:
|
| 247 |
+
>>> cs.xform('<t9>', '<t9><t10>')
|
| 248 |
+
Traceback (most recent call last):
|
| 249 |
+
. . .
|
| 250 |
+
ValueError: Transformation generated invalid chunkstring: tag changed
|
| 251 |
+
|
| 252 |
+
Chunking Rules
|
| 253 |
+
--------------
|
| 254 |
+
|
| 255 |
+
Test the different rule constructors & __repr__ methods:
|
| 256 |
+
|
| 257 |
+
>>> r1 = RegexpChunkRule('<a|b>'+ChunkString.IN_STRIP_PATTERN,
|
| 258 |
+
... '{<a|b>}', 'chunk <a> and <b>')
|
| 259 |
+
>>> r2 = RegexpChunkRule(re.compile('<a|b>'+ChunkString.IN_STRIP_PATTERN),
|
| 260 |
+
... '{<a|b>}', 'chunk <a> and <b>')
|
| 261 |
+
>>> r3 = ChunkRule('<a|b>', 'chunk <a> and <b>')
|
| 262 |
+
>>> r4 = StripRule('<a|b>', 'strip <a> and <b>')
|
| 263 |
+
>>> r5 = UnChunkRule('<a|b>', 'unchunk <a> and <b>')
|
| 264 |
+
>>> r6 = MergeRule('<a>', '<b>', 'merge <a> w/ <b>')
|
| 265 |
+
>>> r7 = SplitRule('<a>', '<b>', 'split <a> from <b>')
|
| 266 |
+
>>> r8 = ExpandLeftRule('<a>', '<b>', 'expand left <a> <b>')
|
| 267 |
+
>>> r9 = ExpandRightRule('<a>', '<b>', 'expand right <a> <b>')
|
| 268 |
+
>>> for rule in r1, r2, r3, r4, r5, r6, r7, r8, r9:
|
| 269 |
+
... print(rule)
|
| 270 |
+
<RegexpChunkRule: '<a|b>(?=[^\\}]*(\\{|$))'->'{<a|b>}'>
|
| 271 |
+
<RegexpChunkRule: '<a|b>(?=[^\\}]*(\\{|$))'->'{<a|b>}'>
|
| 272 |
+
<ChunkRule: '<a|b>'>
|
| 273 |
+
<StripRule: '<a|b>'>
|
| 274 |
+
<UnChunkRule: '<a|b>'>
|
| 275 |
+
<MergeRule: '<a>', '<b>'>
|
| 276 |
+
<SplitRule: '<a>', '<b>'>
|
| 277 |
+
<ExpandLeftRule: '<a>', '<b>'>
|
| 278 |
+
<ExpandRightRule: '<a>', '<b>'>
|
| 279 |
+
|
| 280 |
+
`tag_pattern2re_pattern()` complains if the tag pattern looks problematic:
|
| 281 |
+
|
| 282 |
+
>>> tag_pattern2re_pattern('{}')
|
| 283 |
+
Traceback (most recent call last):
|
| 284 |
+
. . .
|
| 285 |
+
ValueError: Bad tag pattern: '{}'
|
| 286 |
+
|
| 287 |
+
RegexpChunkParser
|
| 288 |
+
-----------------
|
| 289 |
+
|
| 290 |
+
A warning is printed when parsing an empty sentence:
|
| 291 |
+
|
| 292 |
+
>>> parser = RegexpChunkParser([ChunkRule('<a>', '')])
|
| 293 |
+
>>> parser.parse(Tree('S', []))
|
| 294 |
+
Warning: parsing empty text
|
| 295 |
+
Tree('S', [])
|
| 296 |
+
|
| 297 |
+
RegexpParser
|
| 298 |
+
------------
|
| 299 |
+
|
| 300 |
+
>>> parser = RegexpParser('''
|
| 301 |
+
... NP: {<DT>? <JJ>* <NN>*} # NP
|
| 302 |
+
... P: {<IN>} # Preposition
|
| 303 |
+
... V: {<V.*>} # Verb
|
| 304 |
+
... PP: {<P> <NP>} # PP -> P NP
|
| 305 |
+
... VP: {<V> <NP|PP>*} # VP -> V (NP|PP)*
|
| 306 |
+
... ''')
|
| 307 |
+
>>> print(repr(parser))
|
| 308 |
+
<chunk.RegexpParser with 5 stages>
|
| 309 |
+
>>> print(parser)
|
| 310 |
+
chunk.RegexpParser with 5 stages:
|
| 311 |
+
RegexpChunkParser with 1 rules:
|
| 312 |
+
NP <ChunkRule: '<DT>? <JJ>* <NN>*'>
|
| 313 |
+
RegexpChunkParser with 1 rules:
|
| 314 |
+
Preposition <ChunkRule: '<IN>'>
|
| 315 |
+
RegexpChunkParser with 1 rules:
|
| 316 |
+
Verb <ChunkRule: '<V.*>'>
|
| 317 |
+
RegexpChunkParser with 1 rules:
|
| 318 |
+
PP -> P NP <ChunkRule: '<P> <NP>'>
|
| 319 |
+
RegexpChunkParser with 1 rules:
|
| 320 |
+
VP -> V (NP|PP)* <ChunkRule: '<V> <NP|PP>*'>
|
| 321 |
+
>>> print(parser.parse(unchunked_text, trace=True))
|
| 322 |
+
# Input:
|
| 323 |
+
<DT> <NN> <VBD> <IN> <DT> <NN> <DT> <NN> <VBD> <.>
|
| 324 |
+
# NP:
|
| 325 |
+
{<DT> <NN>} <VBD> <IN> {<DT> <NN>}{<DT> <NN>} <VBD> <.>
|
| 326 |
+
# Input:
|
| 327 |
+
<NP> <VBD> <IN> <NP> <NP> <VBD> <.>
|
| 328 |
+
# Preposition:
|
| 329 |
+
<NP> <VBD> {<IN>} <NP> <NP> <VBD> <.>
|
| 330 |
+
# Input:
|
| 331 |
+
<NP> <VBD> <P> <NP> <NP> <VBD> <.>
|
| 332 |
+
# Verb:
|
| 333 |
+
<NP> {<VBD>} <P> <NP> <NP> {<VBD>} <.>
|
| 334 |
+
# Input:
|
| 335 |
+
<NP> <V> <P> <NP> <NP> <V> <.>
|
| 336 |
+
# PP -> P NP:
|
| 337 |
+
<NP> <V> {<P> <NP>} <NP> <V> <.>
|
| 338 |
+
# Input:
|
| 339 |
+
<NP> <V> <PP> <NP> <V> <.>
|
| 340 |
+
# VP -> V (NP|PP)*:
|
| 341 |
+
<NP> {<V> <PP> <NP>}{<V>} <.>
|
| 342 |
+
(S
|
| 343 |
+
(NP The/DT cat/NN)
|
| 344 |
+
(VP
|
| 345 |
+
(V sat/VBD)
|
| 346 |
+
(PP (P on/IN) (NP the/DT mat/NN))
|
| 347 |
+
(NP the/DT dog/NN))
|
| 348 |
+
(VP (V chewed/VBD))
|
| 349 |
+
./.)
|
| 350 |
+
|
| 351 |
+
Test parsing of other rule types:
|
| 352 |
+
|
| 353 |
+
>>> print(RegexpParser('''
|
| 354 |
+
... X:
|
| 355 |
+
... }<a><b>{ # strip rule
|
| 356 |
+
... <a>}{<b> # split rule
|
| 357 |
+
... <a>{}<b> # merge rule
|
| 358 |
+
... <a>{<b>}<c> # chunk rule w/ context
|
| 359 |
+
... '''))
|
| 360 |
+
chunk.RegexpParser with 1 stages:
|
| 361 |
+
RegexpChunkParser with 4 rules:
|
| 362 |
+
strip rule <StripRule: '<a><b>'>
|
| 363 |
+
split rule <SplitRule: '<a>', '<b>'>
|
| 364 |
+
merge rule <MergeRule: '<a>', '<b>'>
|
| 365 |
+
chunk rule w/ context <ChunkRuleWithContext: '<a>', '<b>', '<c>'>
|
| 366 |
+
|
| 367 |
+
Illegal patterns give an error message:
|
| 368 |
+
|
| 369 |
+
>>> print(RegexpParser('X: {<foo>} {<bar>}'))
|
| 370 |
+
Traceback (most recent call last):
|
| 371 |
+
. . .
|
| 372 |
+
ValueError: Illegal chunk pattern: {<foo>} {<bar>}
|
openflamingo/lib/python3.10/site-packages/nltk/test/classify.doctest
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2024 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
=============
|
| 5 |
+
Classifiers
|
| 6 |
+
=============
|
| 7 |
+
|
| 8 |
+
>>> from nltk.test.classify_fixt import setup_module
|
| 9 |
+
>>> setup_module()
|
| 10 |
+
|
| 11 |
+
Classifiers label tokens with category labels (or *class labels*).
|
| 12 |
+
Typically, labels are represented with strings (such as ``"health"``
|
| 13 |
+
or ``"sports"``. In NLTK, classifiers are defined using classes that
|
| 14 |
+
implement the `ClassifierI` interface, which supports the following operations:
|
| 15 |
+
|
| 16 |
+
- self.classify(featureset)
|
| 17 |
+
- self.classify_many(featuresets)
|
| 18 |
+
- self.labels()
|
| 19 |
+
- self.prob_classify(featureset)
|
| 20 |
+
- self.prob_classify_many(featuresets)
|
| 21 |
+
|
| 22 |
+
NLTK defines several classifier classes:
|
| 23 |
+
|
| 24 |
+
- `ConditionalExponentialClassifier`
|
| 25 |
+
- `DecisionTreeClassifier`
|
| 26 |
+
- `MaxentClassifier`
|
| 27 |
+
- `NaiveBayesClassifier`
|
| 28 |
+
- `WekaClassifier`
|
| 29 |
+
|
| 30 |
+
Classifiers are typically created by training them on a training
|
| 31 |
+
corpus.
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
Regression Tests
|
| 35 |
+
~~~~~~~~~~~~~~~~
|
| 36 |
+
|
| 37 |
+
We define a very simple training corpus with 3 binary features: ['a',
|
| 38 |
+
'b', 'c'], and are two labels: ['x', 'y']. We use a simple feature set so
|
| 39 |
+
that the correct answers can be calculated analytically (although we
|
| 40 |
+
haven't done this yet for all tests).
|
| 41 |
+
|
| 42 |
+
>>> import nltk
|
| 43 |
+
>>> train = [
|
| 44 |
+
... (dict(a=1,b=1,c=1), 'y'),
|
| 45 |
+
... (dict(a=1,b=1,c=1), 'x'),
|
| 46 |
+
... (dict(a=1,b=1,c=0), 'y'),
|
| 47 |
+
... (dict(a=0,b=1,c=1), 'x'),
|
| 48 |
+
... (dict(a=0,b=1,c=1), 'y'),
|
| 49 |
+
... (dict(a=0,b=0,c=1), 'y'),
|
| 50 |
+
... (dict(a=0,b=1,c=0), 'x'),
|
| 51 |
+
... (dict(a=0,b=0,c=0), 'x'),
|
| 52 |
+
... (dict(a=0,b=1,c=1), 'y'),
|
| 53 |
+
... (dict(a=None,b=1,c=0), 'x'),
|
| 54 |
+
... ]
|
| 55 |
+
>>> test = [
|
| 56 |
+
... (dict(a=1,b=0,c=1)), # unseen
|
| 57 |
+
... (dict(a=1,b=0,c=0)), # unseen
|
| 58 |
+
... (dict(a=0,b=1,c=1)), # seen 3 times, labels=y,y,x
|
| 59 |
+
... (dict(a=0,b=1,c=0)), # seen 1 time, label=x
|
| 60 |
+
... ]
|
| 61 |
+
|
| 62 |
+
Test the Naive Bayes classifier:
|
| 63 |
+
|
| 64 |
+
>>> classifier = nltk.classify.NaiveBayesClassifier.train(train)
|
| 65 |
+
>>> sorted(classifier.labels())
|
| 66 |
+
['x', 'y']
|
| 67 |
+
>>> classifier.classify_many(test)
|
| 68 |
+
['y', 'x', 'y', 'x']
|
| 69 |
+
>>> for pdist in classifier.prob_classify_many(test):
|
| 70 |
+
... print('%.4f %.4f' % (pdist.prob('x'), pdist.prob('y')))
|
| 71 |
+
0.2500 0.7500
|
| 72 |
+
0.5833 0.4167
|
| 73 |
+
0.3571 0.6429
|
| 74 |
+
0.7000 0.3000
|
| 75 |
+
>>> classifier.show_most_informative_features()
|
| 76 |
+
Most Informative Features
|
| 77 |
+
c = 0 x : y = 2.3 : 1.0
|
| 78 |
+
c = 1 y : x = 1.8 : 1.0
|
| 79 |
+
a = 1 y : x = 1.7 : 1.0
|
| 80 |
+
a = 0 x : y = 1.0 : 1.0
|
| 81 |
+
b = 0 x : y = 1.0 : 1.0
|
| 82 |
+
b = 1 x : y = 1.0 : 1.0
|
| 83 |
+
|
| 84 |
+
Test the Decision Tree classifier (without None):
|
| 85 |
+
|
| 86 |
+
>>> classifier = nltk.classify.DecisionTreeClassifier.train(
|
| 87 |
+
... train[:-1], entropy_cutoff=0,
|
| 88 |
+
... support_cutoff=0)
|
| 89 |
+
>>> sorted(classifier.labels())
|
| 90 |
+
['x', 'y']
|
| 91 |
+
>>> print(classifier)
|
| 92 |
+
c=0? .................................................. x
|
| 93 |
+
a=0? ................................................ x
|
| 94 |
+
a=1? ................................................ y
|
| 95 |
+
c=1? .................................................. y
|
| 96 |
+
<BLANKLINE>
|
| 97 |
+
>>> classifier.classify_many(test)
|
| 98 |
+
['y', 'y', 'y', 'x']
|
| 99 |
+
>>> for pdist in classifier.prob_classify_many(test):
|
| 100 |
+
... print('%.4f %.4f' % (pdist.prob('x'), pdist.prob('y')))
|
| 101 |
+
Traceback (most recent call last):
|
| 102 |
+
. . .
|
| 103 |
+
NotImplementedError
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
Test the Decision Tree classifier (with None):
|
| 107 |
+
|
| 108 |
+
>>> classifier = nltk.classify.DecisionTreeClassifier.train(
|
| 109 |
+
... train, entropy_cutoff=0,
|
| 110 |
+
... support_cutoff=0)
|
| 111 |
+
>>> sorted(classifier.labels())
|
| 112 |
+
['x', 'y']
|
| 113 |
+
>>> print(classifier)
|
| 114 |
+
c=0? .................................................. x
|
| 115 |
+
a=0? ................................................ x
|
| 116 |
+
a=1? ................................................ y
|
| 117 |
+
a=None? ............................................. x
|
| 118 |
+
c=1? .................................................. y
|
| 119 |
+
<BLANKLINE>
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
Test SklearnClassifier, which requires the scikit-learn package.
|
| 123 |
+
|
| 124 |
+
>>> from nltk.classify import SklearnClassifier
|
| 125 |
+
>>> from sklearn.naive_bayes import BernoulliNB
|
| 126 |
+
>>> from sklearn.svm import SVC
|
| 127 |
+
>>> train_data = [({"a": 4, "b": 1, "c": 0}, "ham"),
|
| 128 |
+
... ({"a": 5, "b": 2, "c": 1}, "ham"),
|
| 129 |
+
... ({"a": 0, "b": 3, "c": 4}, "spam"),
|
| 130 |
+
... ({"a": 5, "b": 1, "c": 1}, "ham"),
|
| 131 |
+
... ({"a": 1, "b": 4, "c": 3}, "spam")]
|
| 132 |
+
>>> classif = SklearnClassifier(BernoulliNB()).train(train_data)
|
| 133 |
+
>>> test_data = [{"a": 3, "b": 2, "c": 1},
|
| 134 |
+
... {"a": 0, "b": 3, "c": 7}]
|
| 135 |
+
>>> classif.classify_many(test_data)
|
| 136 |
+
['ham', 'spam']
|
| 137 |
+
>>> classif = SklearnClassifier(SVC(), sparse=False).train(train_data)
|
| 138 |
+
>>> classif.classify_many(test_data)
|
| 139 |
+
['ham', 'spam']
|
| 140 |
+
|
| 141 |
+
Test the Maximum Entropy classifier training algorithms; they should all
|
| 142 |
+
generate the same results.
|
| 143 |
+
|
| 144 |
+
>>> def print_maxent_test_header():
|
| 145 |
+
... print(' '*11+''.join([' test[%s] ' % i
|
| 146 |
+
... for i in range(len(test))]))
|
| 147 |
+
... print(' '*11+' p(x) p(y)'*len(test))
|
| 148 |
+
... print('-'*(11+15*len(test)))
|
| 149 |
+
|
| 150 |
+
>>> def test_maxent(algorithm):
|
| 151 |
+
... print('%11s' % algorithm, end=' ')
|
| 152 |
+
... try:
|
| 153 |
+
... classifier = nltk.classify.MaxentClassifier.train(
|
| 154 |
+
... train, algorithm, trace=0, max_iter=1000)
|
| 155 |
+
... except Exception as e:
|
| 156 |
+
... print('Error: %r' % e)
|
| 157 |
+
... return
|
| 158 |
+
...
|
| 159 |
+
... for featureset in test:
|
| 160 |
+
... pdist = classifier.prob_classify(featureset)
|
| 161 |
+
... print('%8.2f%6.2f' % (pdist.prob('x'), pdist.prob('y')), end=' ')
|
| 162 |
+
... print()
|
| 163 |
+
|
| 164 |
+
>>> print_maxent_test_header(); test_maxent('GIS'); test_maxent('IIS')
|
| 165 |
+
test[0] test[1] test[2] test[3]
|
| 166 |
+
p(x) p(y) p(x) p(y) p(x) p(y) p(x) p(y)
|
| 167 |
+
-----------------------------------------------------------------------
|
| 168 |
+
GIS 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24
|
| 169 |
+
IIS 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24
|
| 170 |
+
|
| 171 |
+
>>> test_maxent('MEGAM'); test_maxent('TADM') # doctest: +SKIP
|
| 172 |
+
MEGAM 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24
|
| 173 |
+
TADM 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
Regression tests for TypedMaxentFeatureEncoding
|
| 178 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 179 |
+
|
| 180 |
+
>>> from nltk.classify import maxent
|
| 181 |
+
>>> train = [
|
| 182 |
+
... ({'a': 1, 'b': 1, 'c': 1}, 'y'),
|
| 183 |
+
... ({'a': 5, 'b': 5, 'c': 5}, 'x'),
|
| 184 |
+
... ({'a': 0.9, 'b': 0.9, 'c': 0.9}, 'y'),
|
| 185 |
+
... ({'a': 5.5, 'b': 5.4, 'c': 5.3}, 'x'),
|
| 186 |
+
... ({'a': 0.8, 'b': 1.2, 'c': 1}, 'y'),
|
| 187 |
+
... ({'a': 5.1, 'b': 4.9, 'c': 5.2}, 'x')
|
| 188 |
+
... ]
|
| 189 |
+
|
| 190 |
+
>>> test = [
|
| 191 |
+
... {'a': 1, 'b': 0.8, 'c': 1.2},
|
| 192 |
+
... {'a': 5.2, 'b': 5.1, 'c': 5}
|
| 193 |
+
... ]
|
| 194 |
+
|
| 195 |
+
>>> encoding = maxent.TypedMaxentFeatureEncoding.train(
|
| 196 |
+
... train, count_cutoff=3, alwayson_features=True)
|
| 197 |
+
|
| 198 |
+
>>> classifier = maxent.MaxentClassifier.train(
|
| 199 |
+
... train, bernoulli=False, encoding=encoding, trace=0)
|
| 200 |
+
|
| 201 |
+
>>> classifier.classify_many(test)
|
| 202 |
+
['y', 'x']
|
openflamingo/lib/python3.10/site-packages/nltk/test/conftest.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
from nltk.corpus.reader import CorpusReader
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@pytest.fixture(autouse=True)
|
| 7 |
+
def mock_plot(mocker):
|
| 8 |
+
"""Disable matplotlib plotting in test code"""
|
| 9 |
+
|
| 10 |
+
try:
|
| 11 |
+
import matplotlib.pyplot as plt
|
| 12 |
+
|
| 13 |
+
mocker.patch.object(plt, "gca")
|
| 14 |
+
mocker.patch.object(plt, "show")
|
| 15 |
+
except ImportError:
|
| 16 |
+
pass
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
@pytest.fixture(scope="module", autouse=True)
|
| 20 |
+
def teardown_loaded_corpora():
|
| 21 |
+
"""
|
| 22 |
+
After each test session ends (either doctest or unit test),
|
| 23 |
+
unload any loaded corpora
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
yield # first, wait for the test to end
|
| 27 |
+
|
| 28 |
+
import nltk.corpus
|
| 29 |
+
|
| 30 |
+
for name in dir(nltk.corpus):
|
| 31 |
+
obj = getattr(nltk.corpus, name, None)
|
| 32 |
+
if isinstance(obj, CorpusReader) and hasattr(obj, "_unload"):
|
| 33 |
+
obj._unload()
|
openflamingo/lib/python3.10/site-packages/nltk/test/corpus.doctest
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
openflamingo/lib/python3.10/site-packages/nltk/test/crubadan.doctest
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2024 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
Crubadan Corpus Reader
|
| 5 |
+
======================
|
| 6 |
+
|
| 7 |
+
Crubadan is an NLTK corpus reader for ngram files provided
|
| 8 |
+
by the Crubadan project. It supports several languages.
|
| 9 |
+
|
| 10 |
+
>>> from nltk.corpus import crubadan
|
| 11 |
+
>>> crubadan.langs()
|
| 12 |
+
['abk', 'abn',..., 'zpa', 'zul']
|
| 13 |
+
|
| 14 |
+
----------------------------------------
|
| 15 |
+
Language code mapping and helper methods
|
| 16 |
+
----------------------------------------
|
| 17 |
+
|
| 18 |
+
The web crawler that generates the 3-gram frequencies works at the
|
| 19 |
+
level of "writing systems" rather than languages. Writing systems
|
| 20 |
+
are assigned internal 2-3 letter codes that require mapping to the
|
| 21 |
+
standard ISO 639-3 codes. For more information, please refer to
|
| 22 |
+
the README in nltk_data/crubadan folder after installing it.
|
| 23 |
+
|
| 24 |
+
To translate ISO 639-3 codes to "Crubadan Code":
|
| 25 |
+
|
| 26 |
+
>>> crubadan.iso_to_crubadan('eng')
|
| 27 |
+
'en'
|
| 28 |
+
>>> crubadan.iso_to_crubadan('fra')
|
| 29 |
+
'fr'
|
| 30 |
+
>>> crubadan.iso_to_crubadan('aaa')
|
| 31 |
+
|
| 32 |
+
In reverse, print ISO 639-3 code if we have the Crubadan Code:
|
| 33 |
+
|
| 34 |
+
>>> crubadan.crubadan_to_iso('en')
|
| 35 |
+
'eng'
|
| 36 |
+
>>> crubadan.crubadan_to_iso('fr')
|
| 37 |
+
'fra'
|
| 38 |
+
>>> crubadan.crubadan_to_iso('aa')
|
| 39 |
+
|
| 40 |
+
---------------------------
|
| 41 |
+
Accessing ngram frequencies
|
| 42 |
+
---------------------------
|
| 43 |
+
|
| 44 |
+
On initialization the reader will create a dictionary of every
|
| 45 |
+
language supported by the Crubadan project, mapping the ISO 639-3
|
| 46 |
+
language code to its corresponding ngram frequency.
|
| 47 |
+
|
| 48 |
+
You can access individual language FreqDist and the ngrams within them as follows:
|
| 49 |
+
|
| 50 |
+
>>> english_fd = crubadan.lang_freq('eng')
|
| 51 |
+
>>> english_fd['the']
|
| 52 |
+
728135
|
| 53 |
+
|
| 54 |
+
Above accesses the FreqDist of English and returns the frequency of the ngram 'the'.
|
| 55 |
+
A ngram that isn't found within the language will return 0:
|
| 56 |
+
|
| 57 |
+
>>> english_fd['sometest']
|
| 58 |
+
0
|
| 59 |
+
|
| 60 |
+
A language that isn't supported will raise an exception:
|
| 61 |
+
|
| 62 |
+
>>> crubadan.lang_freq('elvish')
|
| 63 |
+
Traceback (most recent call last):
|
| 64 |
+
...
|
| 65 |
+
RuntimeError: Unsupported language.
|
openflamingo/lib/python3.10/site-packages/nltk/test/framenet.doctest
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2024 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
========
|
| 5 |
+
FrameNet
|
| 6 |
+
========
|
| 7 |
+
|
| 8 |
+
The FrameNet corpus is a lexical database of English that is both human-
|
| 9 |
+
and machine-readable, based on annotating examples of how words are used
|
| 10 |
+
in actual texts. FrameNet is based on a theory of meaning called Frame
|
| 11 |
+
Semantics, deriving from the work of Charles J. Fillmore and colleagues.
|
| 12 |
+
The basic idea is straightforward: that the meanings of most words can
|
| 13 |
+
best be understood on the basis of a semantic frame: a description of a
|
| 14 |
+
type of event, relation, or entity and the participants in it. For
|
| 15 |
+
example, the concept of cooking typically involves a person doing the
|
| 16 |
+
cooking (Cook), the food that is to be cooked (Food), something to hold
|
| 17 |
+
the food while cooking (Container) and a source of heat
|
| 18 |
+
(Heating_instrument). In the FrameNet project, this is represented as a
|
| 19 |
+
frame called Apply_heat, and the Cook, Food, Heating_instrument and
|
| 20 |
+
Container are called frame elements (FEs). Words that evoke this frame,
|
| 21 |
+
such as fry, bake, boil, and broil, are called lexical units (LUs) of
|
| 22 |
+
the Apply_heat frame. The job of FrameNet is to define the frames
|
| 23 |
+
and to annotate sentences to show how the FEs fit syntactically around
|
| 24 |
+
the word that evokes the frame.
|
| 25 |
+
|
| 26 |
+
------
|
| 27 |
+
Frames
|
| 28 |
+
------
|
| 29 |
+
|
| 30 |
+
A Frame is a script-like conceptual structure that describes a
|
| 31 |
+
particular type of situation, object, or event along with the
|
| 32 |
+
participants and props that are needed for that Frame. For
|
| 33 |
+
example, the "Apply_heat" frame describes a common situation
|
| 34 |
+
involving a Cook, some Food, and a Heating_Instrument, and is
|
| 35 |
+
evoked by words such as bake, blanch, boil, broil, brown,
|
| 36 |
+
simmer, steam, etc.
|
| 37 |
+
|
| 38 |
+
We call the roles of a Frame "frame elements" (FEs) and the
|
| 39 |
+
frame-evoking words are called "lexical units" (LUs).
|
| 40 |
+
|
| 41 |
+
FrameNet includes relations between Frames. Several types of
|
| 42 |
+
relations are defined, of which the most important are:
|
| 43 |
+
|
| 44 |
+
- Inheritance: An IS-A relation. The child frame is a subtype
|
| 45 |
+
of the parent frame, and each FE in the parent is bound to
|
| 46 |
+
a corresponding FE in the child. An example is the
|
| 47 |
+
"Revenge" frame which inherits from the
|
| 48 |
+
"Rewards_and_punishments" frame.
|
| 49 |
+
|
| 50 |
+
- Using: The child frame presupposes the parent frame as
|
| 51 |
+
background, e.g the "Speed" frame "uses" (or presupposes)
|
| 52 |
+
the "Motion" frame; however, not all parent FEs need to be
|
| 53 |
+
bound to child FEs.
|
| 54 |
+
|
| 55 |
+
- Subframe: The child frame is a subevent of a complex event
|
| 56 |
+
represented by the parent, e.g. the "Criminal_process" frame
|
| 57 |
+
has subframes of "Arrest", "Arraignment", "Trial", and
|
| 58 |
+
"Sentencing".
|
| 59 |
+
|
| 60 |
+
- Perspective_on: The child frame provides a particular
|
| 61 |
+
perspective on an un-perspectivized parent frame. A pair of
|
| 62 |
+
examples consists of the "Hiring" and "Get_a_job" frames,
|
| 63 |
+
which perspectivize the "Employment_start" frame from the
|
| 64 |
+
Employer's and the Employee's point of view, respectively.
|
| 65 |
+
|
| 66 |
+
To get a list of all of the Frames in FrameNet, you can use the
|
| 67 |
+
`frames()` function. If you supply a regular expression pattern to the
|
| 68 |
+
`frames()` function, you will get a list of all Frames whose names match
|
| 69 |
+
that pattern:
|
| 70 |
+
|
| 71 |
+
>>> from pprint import pprint
|
| 72 |
+
>>> from operator import itemgetter
|
| 73 |
+
>>> from nltk.corpus import framenet as fn
|
| 74 |
+
>>> from nltk.corpus.reader.framenet import PrettyList
|
| 75 |
+
>>> x = fn.frames(r'(?i)crim')
|
| 76 |
+
>>> x.sort(key=itemgetter('ID'))
|
| 77 |
+
>>> x
|
| 78 |
+
[<frame ID=200 name=Criminal_process>, <frame ID=500 name=Criminal_investigation>, ...]
|
| 79 |
+
>>> PrettyList(sorted(x, key=itemgetter('ID')))
|
| 80 |
+
[<frame ID=200 name=Criminal_process>, <frame ID=500 name=Criminal_investigation>, ...]
|
| 81 |
+
|
| 82 |
+
To get the details of a particular Frame, you can use the `frame()`
|
| 83 |
+
function passing in the frame number:
|
| 84 |
+
|
| 85 |
+
>>> from pprint import pprint
|
| 86 |
+
>>> from nltk.corpus import framenet as fn
|
| 87 |
+
>>> f = fn.frame(202)
|
| 88 |
+
>>> f.ID
|
| 89 |
+
202
|
| 90 |
+
>>> f.name
|
| 91 |
+
'Arrest'
|
| 92 |
+
>>> f.definition
|
| 93 |
+
"Authorities charge a Suspect, who is under suspicion of having committed a crime..."
|
| 94 |
+
>>> len(f.lexUnit)
|
| 95 |
+
11
|
| 96 |
+
>>> pprint(sorted([x for x in f.FE]))
|
| 97 |
+
['Authorities',
|
| 98 |
+
'Charges',
|
| 99 |
+
'Co-participant',
|
| 100 |
+
'Manner',
|
| 101 |
+
'Means',
|
| 102 |
+
'Offense',
|
| 103 |
+
'Place',
|
| 104 |
+
'Purpose',
|
| 105 |
+
'Source_of_legal_authority',
|
| 106 |
+
'Suspect',
|
| 107 |
+
'Time',
|
| 108 |
+
'Type']
|
| 109 |
+
>>> pprint(f.frameRelations)
|
| 110 |
+
[<Parent=Intentionally_affect -- Inheritance -> Child=Arrest>, <Complex=Criminal_process -- Subframe -> Component=Arrest>, ...]
|
| 111 |
+
|
| 112 |
+
The `frame()` function shown above returns a dict object containing
|
| 113 |
+
detailed information about the Frame. See the documentation on the
|
| 114 |
+
`frame()` function for the specifics.
|
| 115 |
+
|
| 116 |
+
You can also search for Frames by their Lexical Units (LUs). The
|
| 117 |
+
`frames_by_lemma()` function returns a list of all frames that contain
|
| 118 |
+
LUs in which the 'name' attribute of the LU matches the given regular
|
| 119 |
+
expression. Note that LU names are composed of "lemma.POS", where the
|
| 120 |
+
"lemma" part can be made up of either a single lexeme (e.g. 'run') or
|
| 121 |
+
multiple lexemes (e.g. 'a little') (see below).
|
| 122 |
+
|
| 123 |
+
>>> PrettyList(sorted(fn.frames_by_lemma(r'(?i)a little'), key=itemgetter('ID')))
|
| 124 |
+
[<frame ID=189 name=Quanti...>, <frame ID=2001 name=Degree>]
|
| 125 |
+
|
| 126 |
+
-------------
|
| 127 |
+
Lexical Units
|
| 128 |
+
-------------
|
| 129 |
+
|
| 130 |
+
A lexical unit (LU) is a pairing of a word with a meaning. For
|
| 131 |
+
example, the "Apply_heat" Frame describes a common situation
|
| 132 |
+
involving a Cook, some Food, and a Heating Instrument, and is
|
| 133 |
+
_evoked_ by words such as bake, blanch, boil, broil, brown,
|
| 134 |
+
simmer, steam, etc. These frame-evoking words are the LUs in the
|
| 135 |
+
Apply_heat frame. Each sense of a polysemous word is a different
|
| 136 |
+
LU.
|
| 137 |
+
|
| 138 |
+
We have used the word "word" in talking about LUs. The reality
|
| 139 |
+
is actually rather complex. When we say that the word "bake" is
|
| 140 |
+
polysemous, we mean that the lemma "bake.v" (which has the
|
| 141 |
+
word-forms "bake", "bakes", "baked", and "baking") is linked to
|
| 142 |
+
three different frames:
|
| 143 |
+
|
| 144 |
+
- Apply_heat: "Michelle baked the potatoes for 45 minutes."
|
| 145 |
+
|
| 146 |
+
- Cooking_creation: "Michelle baked her mother a cake for her birthday."
|
| 147 |
+
|
| 148 |
+
- Absorb_heat: "The potatoes have to bake for more than 30 minutes."
|
| 149 |
+
|
| 150 |
+
These constitute three different LUs, with different
|
| 151 |
+
definitions.
|
| 152 |
+
|
| 153 |
+
Multiword expressions such as "given name" and hyphenated words
|
| 154 |
+
like "shut-eye" can also be LUs. Idiomatic phrases such as
|
| 155 |
+
"middle of nowhere" and "give the slip (to)" are also defined as
|
| 156 |
+
LUs in the appropriate frames ("Isolated_places" and "Evading",
|
| 157 |
+
respectively), and their internal structure is not analyzed.
|
| 158 |
+
|
| 159 |
+
Framenet provides multiple annotated examples of each sense of a
|
| 160 |
+
word (i.e. each LU). Moreover, the set of examples
|
| 161 |
+
(approximately 20 per LU) illustrates all of the combinatorial
|
| 162 |
+
possibilities of the lexical unit.
|
| 163 |
+
|
| 164 |
+
Each LU is linked to a Frame, and hence to the other words which
|
| 165 |
+
evoke that Frame. This makes the FrameNet database similar to a
|
| 166 |
+
thesaurus, grouping together semantically similar words.
|
| 167 |
+
|
| 168 |
+
In the simplest case, frame-evoking words are verbs such as
|
| 169 |
+
"fried" in:
|
| 170 |
+
|
| 171 |
+
"Matilde fried the catfish in a heavy iron skillet."
|
| 172 |
+
|
| 173 |
+
Sometimes event nouns may evoke a Frame. For example,
|
| 174 |
+
"reduction" evokes "Cause_change_of_scalar_position" in:
|
| 175 |
+
|
| 176 |
+
"...the reduction of debt levels to $665 million from $2.6 billion."
|
| 177 |
+
|
| 178 |
+
Adjectives may also evoke a Frame. For example, "asleep" may
|
| 179 |
+
evoke the "Sleep" frame as in:
|
| 180 |
+
|
| 181 |
+
"They were asleep for hours."
|
| 182 |
+
|
| 183 |
+
Many common nouns, such as artifacts like "hat" or "tower",
|
| 184 |
+
typically serve as dependents rather than clearly evoking their
|
| 185 |
+
own frames.
|
| 186 |
+
|
| 187 |
+
Details for a specific lexical unit can be obtained using this class's
|
| 188 |
+
`lus()` function, which takes an optional regular expression
|
| 189 |
+
pattern that will be matched against the name of the lexical unit:
|
| 190 |
+
|
| 191 |
+
>>> from pprint import pprint
|
| 192 |
+
>>> PrettyList(sorted(fn.lus(r'(?i)a little'), key=itemgetter('ID')))
|
| 193 |
+
[<lu ID=14733 name=a little.n>, <lu ID=14743 name=a little.adv>, ...]
|
| 194 |
+
|
| 195 |
+
You can obtain detailed information on a particular LU by calling the
|
| 196 |
+
`lu()` function and passing in an LU's 'ID' number:
|
| 197 |
+
|
| 198 |
+
>>> from pprint import pprint
|
| 199 |
+
>>> from nltk.corpus import framenet as fn
|
| 200 |
+
>>> fn.lu(256).name
|
| 201 |
+
'foresee.v'
|
| 202 |
+
>>> fn.lu(256).definition
|
| 203 |
+
'COD: be aware of beforehand; predict.'
|
| 204 |
+
>>> fn.lu(256).frame.name
|
| 205 |
+
'Expectation'
|
| 206 |
+
>>> fn.lu(256).lexemes[0].name
|
| 207 |
+
'foresee'
|
| 208 |
+
|
| 209 |
+
Note that LU names take the form of a dotted string (e.g. "run.v" or "a
|
| 210 |
+
little.adv") in which a lemma precedes the "." and a part of speech
|
| 211 |
+
(POS) follows the dot. The lemma may be composed of a single lexeme
|
| 212 |
+
(e.g. "run") or of multiple lexemes (e.g. "a little"). The list of
|
| 213 |
+
POSs used in the LUs is:
|
| 214 |
+
|
| 215 |
+
v - verb
|
| 216 |
+
n - noun
|
| 217 |
+
a - adjective
|
| 218 |
+
adv - adverb
|
| 219 |
+
prep - preposition
|
| 220 |
+
num - numbers
|
| 221 |
+
intj - interjection
|
| 222 |
+
art - article
|
| 223 |
+
c - conjunction
|
| 224 |
+
scon - subordinating conjunction
|
| 225 |
+
|
| 226 |
+
For more detailed information about the info that is contained in the
|
| 227 |
+
dict that is returned by the `lu()` function, see the documentation on
|
| 228 |
+
the `lu()` function.
|
| 229 |
+
|
| 230 |
+
-------------------
|
| 231 |
+
Annotated Documents
|
| 232 |
+
-------------------
|
| 233 |
+
|
| 234 |
+
The FrameNet corpus contains a small set of annotated documents. A list
|
| 235 |
+
of these documents can be obtained by calling the `docs()` function:
|
| 236 |
+
|
| 237 |
+
>>> from pprint import pprint
|
| 238 |
+
>>> from nltk.corpus import framenet as fn
|
| 239 |
+
>>> d = fn.docs('BellRinging')[0]
|
| 240 |
+
>>> d.corpname
|
| 241 |
+
'PropBank'
|
| 242 |
+
>>> d.sentence[49]
|
| 243 |
+
full-text sentence (...) in BellRinging:
|
| 244 |
+
<BLANKLINE>
|
| 245 |
+
<BLANKLINE>
|
| 246 |
+
[POS] 17 tags
|
| 247 |
+
<BLANKLINE>
|
| 248 |
+
[POS_tagset] PENN
|
| 249 |
+
<BLANKLINE>
|
| 250 |
+
[text] + [annotationSet]
|
| 251 |
+
<BLANKLINE>
|
| 252 |
+
`` I live in hopes that the ringers themselves will be drawn into
|
| 253 |
+
***** ******* *****
|
| 254 |
+
Desir Cause_t Cause
|
| 255 |
+
[1] [3] [2]
|
| 256 |
+
<BLANKLINE>
|
| 257 |
+
that fuller life .
|
| 258 |
+
******
|
| 259 |
+
Comple
|
| 260 |
+
[4]
|
| 261 |
+
(Desir=Desiring, Cause_t=Cause_to_make_noise, Cause=Cause_motion, Comple=Completeness)
|
| 262 |
+
<BLANKLINE>
|
| 263 |
+
|
| 264 |
+
>>> d.sentence[49].annotationSet[1]
|
| 265 |
+
annotation set (...):
|
| 266 |
+
<BLANKLINE>
|
| 267 |
+
[status] MANUAL
|
| 268 |
+
<BLANKLINE>
|
| 269 |
+
[LU] (6605) hope.n in Desiring
|
| 270 |
+
<BLANKLINE>
|
| 271 |
+
[frame] (366) Desiring
|
| 272 |
+
<BLANKLINE>
|
| 273 |
+
[GF] 2 relations
|
| 274 |
+
<BLANKLINE>
|
| 275 |
+
[PT] 2 phrases
|
| 276 |
+
<BLANKLINE>
|
| 277 |
+
[text] + [Target] + [FE] + [Noun]
|
| 278 |
+
<BLANKLINE>
|
| 279 |
+
`` I live in hopes that the ringers themselves will be drawn into
|
| 280 |
+
- ^^^^ ^^ ***** ----------------------------------------------
|
| 281 |
+
E supp su Event
|
| 282 |
+
<BLANKLINE>
|
| 283 |
+
that fuller life .
|
| 284 |
+
-----------------
|
| 285 |
+
<BLANKLINE>
|
| 286 |
+
(E=Experiencer, su=supp)
|
| 287 |
+
<BLANKLINE>
|
| 288 |
+
<BLANKLINE>
|
openflamingo/lib/python3.10/site-packages/nltk/test/generate.doctest
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2024 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
===============================================
|
| 5 |
+
Generating sentences from context-free grammars
|
| 6 |
+
===============================================
|
| 7 |
+
|
| 8 |
+
An example grammar:
|
| 9 |
+
|
| 10 |
+
>>> from nltk.parse.generate import generate, demo_grammar
|
| 11 |
+
>>> from nltk import CFG
|
| 12 |
+
>>> grammar = CFG.fromstring(demo_grammar)
|
| 13 |
+
>>> print(grammar)
|
| 14 |
+
Grammar with 13 productions (start state = S)
|
| 15 |
+
S -> NP VP
|
| 16 |
+
NP -> Det N
|
| 17 |
+
PP -> P NP
|
| 18 |
+
VP -> 'slept'
|
| 19 |
+
VP -> 'saw' NP
|
| 20 |
+
VP -> 'walked' PP
|
| 21 |
+
Det -> 'the'
|
| 22 |
+
Det -> 'a'
|
| 23 |
+
N -> 'man'
|
| 24 |
+
N -> 'park'
|
| 25 |
+
N -> 'dog'
|
| 26 |
+
P -> 'in'
|
| 27 |
+
P -> 'with'
|
| 28 |
+
|
| 29 |
+
The first 10 generated sentences:
|
| 30 |
+
|
| 31 |
+
>>> for sentence in generate(grammar, n=10):
|
| 32 |
+
... print(' '.join(sentence))
|
| 33 |
+
the man slept
|
| 34 |
+
the man saw the man
|
| 35 |
+
the man saw the park
|
| 36 |
+
the man saw the dog
|
| 37 |
+
the man saw a man
|
| 38 |
+
the man saw a park
|
| 39 |
+
the man saw a dog
|
| 40 |
+
the man walked in the man
|
| 41 |
+
the man walked in the park
|
| 42 |
+
the man walked in the dog
|
| 43 |
+
|
| 44 |
+
All sentences of max depth 4:
|
| 45 |
+
|
| 46 |
+
>>> for sentence in generate(grammar, depth=4):
|
| 47 |
+
... print(' '.join(sentence))
|
| 48 |
+
the man slept
|
| 49 |
+
the park slept
|
| 50 |
+
the dog slept
|
| 51 |
+
a man slept
|
| 52 |
+
a park slept
|
| 53 |
+
a dog slept
|
| 54 |
+
|
| 55 |
+
The number of sentences of different max depths:
|
| 56 |
+
|
| 57 |
+
>>> len(list(generate(grammar, depth=3)))
|
| 58 |
+
0
|
| 59 |
+
>>> len(list(generate(grammar, depth=4)))
|
| 60 |
+
6
|
| 61 |
+
>>> len(list(generate(grammar, depth=5)))
|
| 62 |
+
42
|
| 63 |
+
>>> len(list(generate(grammar, depth=6)))
|
| 64 |
+
114
|
| 65 |
+
>>> len(list(generate(grammar)))
|
| 66 |
+
114
|
| 67 |
+
|
| 68 |
+
Infinite grammars will throw a RecursionError when not bounded by some ``depth``:
|
| 69 |
+
|
| 70 |
+
>>> grammar = CFG.fromstring("""
|
| 71 |
+
... S -> A B
|
| 72 |
+
... A -> B
|
| 73 |
+
... B -> "b" | A
|
| 74 |
+
... """)
|
| 75 |
+
>>> list(generate(grammar))
|
| 76 |
+
Traceback (most recent call last):
|
| 77 |
+
...
|
| 78 |
+
RuntimeError: The grammar has rule(s) that yield infinite recursion!
|
openflamingo/lib/python3.10/site-packages/nltk/test/logic.doctest
ADDED
|
@@ -0,0 +1,1096 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2024 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
=======================
|
| 5 |
+
Logic & Lambda Calculus
|
| 6 |
+
=======================
|
| 7 |
+
|
| 8 |
+
The `nltk.logic` package allows expressions of First-Order Logic (FOL) to be
|
| 9 |
+
parsed into ``Expression`` objects. In addition to FOL, the parser
|
| 10 |
+
handles lambda-abstraction with variables of higher order.
|
| 11 |
+
|
| 12 |
+
--------
|
| 13 |
+
Overview
|
| 14 |
+
--------
|
| 15 |
+
|
| 16 |
+
>>> from nltk.sem.logic import *
|
| 17 |
+
|
| 18 |
+
The default inventory of logical constants is the following:
|
| 19 |
+
|
| 20 |
+
>>> boolean_ops()
|
| 21 |
+
negation -
|
| 22 |
+
conjunction &
|
| 23 |
+
disjunction |
|
| 24 |
+
implication ->
|
| 25 |
+
equivalence <->
|
| 26 |
+
>>> equality_preds()
|
| 27 |
+
equality =
|
| 28 |
+
inequality !=
|
| 29 |
+
>>> binding_ops()
|
| 30 |
+
existential exists
|
| 31 |
+
universal all
|
| 32 |
+
lambda \
|
| 33 |
+
|
| 34 |
+
----------------
|
| 35 |
+
Regression Tests
|
| 36 |
+
----------------
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
Untyped Logic
|
| 40 |
+
+++++++++++++
|
| 41 |
+
|
| 42 |
+
Process logical expressions conveniently:
|
| 43 |
+
|
| 44 |
+
>>> read_expr = Expression.fromstring
|
| 45 |
+
|
| 46 |
+
Test for equality under alpha-conversion
|
| 47 |
+
========================================
|
| 48 |
+
|
| 49 |
+
>>> e1 = read_expr('exists x.P(x)')
|
| 50 |
+
>>> print(e1)
|
| 51 |
+
exists x.P(x)
|
| 52 |
+
>>> e2 = e1.alpha_convert(Variable('z'))
|
| 53 |
+
>>> print(e2)
|
| 54 |
+
exists z.P(z)
|
| 55 |
+
>>> e1 == e2
|
| 56 |
+
True
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
>>> l = read_expr(r'\X.\X.X(X)(1)').simplify()
|
| 60 |
+
>>> id = read_expr(r'\X.X(X)')
|
| 61 |
+
>>> l == id
|
| 62 |
+
True
|
| 63 |
+
|
| 64 |
+
Test numerals
|
| 65 |
+
=============
|
| 66 |
+
|
| 67 |
+
>>> zero = read_expr(r'\F x.x')
|
| 68 |
+
>>> one = read_expr(r'\F x.F(x)')
|
| 69 |
+
>>> two = read_expr(r'\F x.F(F(x))')
|
| 70 |
+
>>> three = read_expr(r'\F x.F(F(F(x)))')
|
| 71 |
+
>>> four = read_expr(r'\F x.F(F(F(F(x))))')
|
| 72 |
+
>>> succ = read_expr(r'\N F x.F(N(F,x))')
|
| 73 |
+
>>> plus = read_expr(r'\M N F x.M(F,N(F,x))')
|
| 74 |
+
>>> mult = read_expr(r'\M N F.M(N(F))')
|
| 75 |
+
>>> pred = read_expr(r'\N F x.(N(\G H.H(G(F)))(\u.x)(\u.u))')
|
| 76 |
+
>>> v1 = ApplicationExpression(succ, zero).simplify()
|
| 77 |
+
>>> v1 == one
|
| 78 |
+
True
|
| 79 |
+
>>> v2 = ApplicationExpression(succ, v1).simplify()
|
| 80 |
+
>>> v2 == two
|
| 81 |
+
True
|
| 82 |
+
>>> v3 = ApplicationExpression(ApplicationExpression(plus, v1), v2).simplify()
|
| 83 |
+
>>> v3 == three
|
| 84 |
+
True
|
| 85 |
+
>>> v4 = ApplicationExpression(ApplicationExpression(mult, v2), v2).simplify()
|
| 86 |
+
>>> v4 == four
|
| 87 |
+
True
|
| 88 |
+
>>> v5 = ApplicationExpression(pred, ApplicationExpression(pred, v4)).simplify()
|
| 89 |
+
>>> v5 == two
|
| 90 |
+
True
|
| 91 |
+
|
| 92 |
+
Overloaded operators also exist, for convenience.
|
| 93 |
+
|
| 94 |
+
>>> print(succ(zero).simplify() == one)
|
| 95 |
+
True
|
| 96 |
+
>>> print(plus(one,two).simplify() == three)
|
| 97 |
+
True
|
| 98 |
+
>>> print(mult(two,two).simplify() == four)
|
| 99 |
+
True
|
| 100 |
+
>>> print(pred(pred(four)).simplify() == two)
|
| 101 |
+
True
|
| 102 |
+
|
| 103 |
+
>>> john = read_expr(r'john')
|
| 104 |
+
>>> man = read_expr(r'\x.man(x)')
|
| 105 |
+
>>> walk = read_expr(r'\x.walk(x)')
|
| 106 |
+
>>> man(john).simplify()
|
| 107 |
+
<ApplicationExpression man(john)>
|
| 108 |
+
>>> print(-walk(john).simplify())
|
| 109 |
+
-walk(john)
|
| 110 |
+
>>> print((man(john) & walk(john)).simplify())
|
| 111 |
+
(man(john) & walk(john))
|
| 112 |
+
>>> print((man(john) | walk(john)).simplify())
|
| 113 |
+
(man(john) | walk(john))
|
| 114 |
+
>>> print((man(john) > walk(john)).simplify())
|
| 115 |
+
(man(john) -> walk(john))
|
| 116 |
+
>>> print((man(john) < walk(john)).simplify())
|
| 117 |
+
(man(john) <-> walk(john))
|
| 118 |
+
|
| 119 |
+
Python's built-in lambda operator can also be used with Expressions
|
| 120 |
+
|
| 121 |
+
>>> john = VariableExpression(Variable('john'))
|
| 122 |
+
>>> run_var = VariableExpression(Variable('run'))
|
| 123 |
+
>>> run = lambda x: run_var(x)
|
| 124 |
+
>>> run(john)
|
| 125 |
+
<ApplicationExpression run(john)>
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
``betaConversionTestSuite.pl``
|
| 129 |
+
------------------------------
|
| 130 |
+
|
| 131 |
+
Tests based on Blackburn & Bos' book, *Representation and Inference
|
| 132 |
+
for Natural Language*.
|
| 133 |
+
|
| 134 |
+
>>> x1 = read_expr(r'\P.P(mia)(\x.walk(x))').simplify()
|
| 135 |
+
>>> x2 = read_expr(r'walk(mia)').simplify()
|
| 136 |
+
>>> x1 == x2
|
| 137 |
+
True
|
| 138 |
+
|
| 139 |
+
>>> x1 = read_expr(r'exists x.(man(x) & ((\P.exists x.(woman(x) & P(x)))(\y.love(x,y))))').simplify()
|
| 140 |
+
>>> x2 = read_expr(r'exists x.(man(x) & exists y.(woman(y) & love(x,y)))').simplify()
|
| 141 |
+
>>> x1 == x2
|
| 142 |
+
True
|
| 143 |
+
>>> x1 = read_expr(r'\a.sleep(a)(mia)').simplify()
|
| 144 |
+
>>> x2 = read_expr(r'sleep(mia)').simplify()
|
| 145 |
+
>>> x1 == x2
|
| 146 |
+
True
|
| 147 |
+
>>> x1 = read_expr(r'\a.\b.like(b,a)(mia)').simplify()
|
| 148 |
+
>>> x2 = read_expr(r'\b.like(b,mia)').simplify()
|
| 149 |
+
>>> x1 == x2
|
| 150 |
+
True
|
| 151 |
+
>>> x1 = read_expr(r'\a.(\b.like(b,a)(vincent))').simplify()
|
| 152 |
+
>>> x2 = read_expr(r'\a.like(vincent,a)').simplify()
|
| 153 |
+
>>> x1 == x2
|
| 154 |
+
True
|
| 155 |
+
>>> x1 = read_expr(r'\a.((\b.like(b,a)(vincent)) & sleep(a))').simplify()
|
| 156 |
+
>>> x2 = read_expr(r'\a.(like(vincent,a) & sleep(a))').simplify()
|
| 157 |
+
>>> x1 == x2
|
| 158 |
+
True
|
| 159 |
+
|
| 160 |
+
>>> x1 = read_expr(r'(\a.\b.like(b,a)(mia)(vincent))').simplify()
|
| 161 |
+
>>> x2 = read_expr(r'like(vincent,mia)').simplify()
|
| 162 |
+
>>> x1 == x2
|
| 163 |
+
True
|
| 164 |
+
|
| 165 |
+
>>> x1 = read_expr(r'P((\a.sleep(a)(vincent)))').simplify()
|
| 166 |
+
>>> x2 = read_expr(r'P(sleep(vincent))').simplify()
|
| 167 |
+
>>> x1 == x2
|
| 168 |
+
True
|
| 169 |
+
|
| 170 |
+
>>> x1 = read_expr(r'\A.A((\b.sleep(b)(vincent)))').simplify()
|
| 171 |
+
>>> x2 = read_expr(r'\A.A(sleep(vincent))').simplify()
|
| 172 |
+
>>> x1 == x2
|
| 173 |
+
True
|
| 174 |
+
|
| 175 |
+
>>> x1 = read_expr(r'\A.A(sleep(vincent))').simplify()
|
| 176 |
+
>>> x2 = read_expr(r'\A.A(sleep(vincent))').simplify()
|
| 177 |
+
>>> x1 == x2
|
| 178 |
+
True
|
| 179 |
+
|
| 180 |
+
>>> x1 = read_expr(r'(\A.A(vincent)(\b.sleep(b)))').simplify()
|
| 181 |
+
>>> x2 = read_expr(r'sleep(vincent)').simplify()
|
| 182 |
+
>>> x1 == x2
|
| 183 |
+
True
|
| 184 |
+
|
| 185 |
+
>>> x1 = read_expr(r'\A.believe(mia,A(vincent))(\b.sleep(b))').simplify()
|
| 186 |
+
>>> x2 = read_expr(r'believe(mia,sleep(vincent))').simplify()
|
| 187 |
+
>>> x1 == x2
|
| 188 |
+
True
|
| 189 |
+
|
| 190 |
+
>>> x1 = read_expr(r'(\A.(A(vincent) & A(mia)))(\b.sleep(b))').simplify()
|
| 191 |
+
>>> x2 = read_expr(r'(sleep(vincent) & sleep(mia))').simplify()
|
| 192 |
+
>>> x1 == x2
|
| 193 |
+
True
|
| 194 |
+
|
| 195 |
+
>>> x1 = read_expr(r'\A.\B.(\C.C(A(vincent))(\d.probably(d)) & (\C.C(B(mia))(\d.improbably(d))))(\f.walk(f))(\f.talk(f))').simplify()
|
| 196 |
+
>>> x2 = read_expr(r'(probably(walk(vincent)) & improbably(talk(mia)))').simplify()
|
| 197 |
+
>>> x1 == x2
|
| 198 |
+
True
|
| 199 |
+
|
| 200 |
+
>>> x1 = read_expr(r'(\a.\b.(\C.C(a,b)(\d.\f.love(d,f))))(jules)(mia)').simplify()
|
| 201 |
+
>>> x2 = read_expr(r'love(jules,mia)').simplify()
|
| 202 |
+
>>> x1 == x2
|
| 203 |
+
True
|
| 204 |
+
|
| 205 |
+
>>> x1 = read_expr(r'(\A.\B.exists c.(A(c) & B(c)))(\d.boxer(d),\d.sleep(d))').simplify()
|
| 206 |
+
>>> x2 = read_expr(r'exists c.(boxer(c) & sleep(c))').simplify()
|
| 207 |
+
>>> x1 == x2
|
| 208 |
+
True
|
| 209 |
+
|
| 210 |
+
>>> x1 = read_expr(r'\A.Z(A)(\c.\a.like(a,c))').simplify()
|
| 211 |
+
>>> x2 = read_expr(r'Z(\c.\a.like(a,c))').simplify()
|
| 212 |
+
>>> x1 == x2
|
| 213 |
+
True
|
| 214 |
+
|
| 215 |
+
>>> x1 = read_expr(r'\A.\b.A(b)(\c.\b.like(b,c))').simplify()
|
| 216 |
+
>>> x2 = read_expr(r'\b.(\c.\b.like(b,c)(b))').simplify()
|
| 217 |
+
>>> x1 == x2
|
| 218 |
+
True
|
| 219 |
+
|
| 220 |
+
>>> x1 = read_expr(r'(\a.\b.(\C.C(a,b)(\b.\a.loves(b,a))))(jules)(mia)').simplify()
|
| 221 |
+
>>> x2 = read_expr(r'loves(jules,mia)').simplify()
|
| 222 |
+
>>> x1 == x2
|
| 223 |
+
True
|
| 224 |
+
|
| 225 |
+
>>> x1 = read_expr(r'(\A.\b.(exists b.A(b) & A(b)))(\c.boxer(c))(vincent)').simplify()
|
| 226 |
+
>>> x2 = read_expr(r'((exists b.boxer(b)) & boxer(vincent))').simplify()
|
| 227 |
+
>>> x1 == x2
|
| 228 |
+
True
|
| 229 |
+
|
| 230 |
+
Test Parser
|
| 231 |
+
===========
|
| 232 |
+
|
| 233 |
+
>>> print(read_expr(r'john'))
|
| 234 |
+
john
|
| 235 |
+
>>> print(read_expr(r'x'))
|
| 236 |
+
x
|
| 237 |
+
>>> print(read_expr(r'-man(x)'))
|
| 238 |
+
-man(x)
|
| 239 |
+
>>> print(read_expr(r'--man(x)'))
|
| 240 |
+
--man(x)
|
| 241 |
+
>>> print(read_expr(r'(man(x))'))
|
| 242 |
+
man(x)
|
| 243 |
+
>>> print(read_expr(r'((man(x)))'))
|
| 244 |
+
man(x)
|
| 245 |
+
>>> print(read_expr(r'man(x) <-> tall(x)'))
|
| 246 |
+
(man(x) <-> tall(x))
|
| 247 |
+
>>> print(read_expr(r'(man(x) <-> tall(x))'))
|
| 248 |
+
(man(x) <-> tall(x))
|
| 249 |
+
>>> print(read_expr(r'(man(x) & tall(x) & walks(x))'))
|
| 250 |
+
(man(x) & tall(x) & walks(x))
|
| 251 |
+
>>> print(read_expr(r'(man(x) & tall(x) & walks(x))').first)
|
| 252 |
+
(man(x) & tall(x))
|
| 253 |
+
>>> print(read_expr(r'man(x) | tall(x) & walks(x)'))
|
| 254 |
+
(man(x) | (tall(x) & walks(x)))
|
| 255 |
+
>>> print(read_expr(r'((man(x) & tall(x)) | walks(x))'))
|
| 256 |
+
((man(x) & tall(x)) | walks(x))
|
| 257 |
+
>>> print(read_expr(r'man(x) & (tall(x) | walks(x))'))
|
| 258 |
+
(man(x) & (tall(x) | walks(x)))
|
| 259 |
+
>>> print(read_expr(r'(man(x) & (tall(x) | walks(x)))'))
|
| 260 |
+
(man(x) & (tall(x) | walks(x)))
|
| 261 |
+
>>> print(read_expr(r'P(x) -> Q(x) <-> R(x) | S(x) & T(x)'))
|
| 262 |
+
((P(x) -> Q(x)) <-> (R(x) | (S(x) & T(x))))
|
| 263 |
+
>>> print(read_expr(r'exists x.man(x)'))
|
| 264 |
+
exists x.man(x)
|
| 265 |
+
>>> print(read_expr(r'exists x.(man(x) & tall(x))'))
|
| 266 |
+
exists x.(man(x) & tall(x))
|
| 267 |
+
>>> print(read_expr(r'exists x.(man(x) & tall(x) & walks(x))'))
|
| 268 |
+
exists x.(man(x) & tall(x) & walks(x))
|
| 269 |
+
>>> print(read_expr(r'-P(x) & Q(x)'))
|
| 270 |
+
(-P(x) & Q(x))
|
| 271 |
+
>>> read_expr(r'-P(x) & Q(x)') == read_expr(r'(-P(x)) & Q(x)')
|
| 272 |
+
True
|
| 273 |
+
>>> print(read_expr(r'\x.man(x)'))
|
| 274 |
+
\x.man(x)
|
| 275 |
+
>>> print(read_expr(r'\x.man(x)(john)'))
|
| 276 |
+
\x.man(x)(john)
|
| 277 |
+
>>> print(read_expr(r'\x.man(x)(john) & tall(x)'))
|
| 278 |
+
(\x.man(x)(john) & tall(x))
|
| 279 |
+
>>> print(read_expr(r'\x.\y.sees(x,y)'))
|
| 280 |
+
\x y.sees(x,y)
|
| 281 |
+
>>> print(read_expr(r'\x y.sees(x,y)'))
|
| 282 |
+
\x y.sees(x,y)
|
| 283 |
+
>>> print(read_expr(r'\x.\y.sees(x,y)(a)'))
|
| 284 |
+
(\x y.sees(x,y))(a)
|
| 285 |
+
>>> print(read_expr(r'\x y.sees(x,y)(a)'))
|
| 286 |
+
(\x y.sees(x,y))(a)
|
| 287 |
+
>>> print(read_expr(r'\x.\y.sees(x,y)(a)(b)'))
|
| 288 |
+
((\x y.sees(x,y))(a))(b)
|
| 289 |
+
>>> print(read_expr(r'\x y.sees(x,y)(a)(b)'))
|
| 290 |
+
((\x y.sees(x,y))(a))(b)
|
| 291 |
+
>>> print(read_expr(r'\x.\y.sees(x,y)(a,b)'))
|
| 292 |
+
((\x y.sees(x,y))(a))(b)
|
| 293 |
+
>>> print(read_expr(r'\x y.sees(x,y)(a,b)'))
|
| 294 |
+
((\x y.sees(x,y))(a))(b)
|
| 295 |
+
>>> print(read_expr(r'((\x.\y.sees(x,y))(a))(b)'))
|
| 296 |
+
((\x y.sees(x,y))(a))(b)
|
| 297 |
+
>>> print(read_expr(r'P(x)(y)(z)'))
|
| 298 |
+
P(x,y,z)
|
| 299 |
+
>>> print(read_expr(r'P(Q)'))
|
| 300 |
+
P(Q)
|
| 301 |
+
>>> print(read_expr(r'P(Q(x))'))
|
| 302 |
+
P(Q(x))
|
| 303 |
+
>>> print(read_expr(r'(\x.exists y.walks(x,y))(x)'))
|
| 304 |
+
(\x.exists y.walks(x,y))(x)
|
| 305 |
+
>>> print(read_expr(r'exists x.(x = john)'))
|
| 306 |
+
exists x.(x = john)
|
| 307 |
+
>>> print(read_expr(r'((\P.\Q.exists x.(P(x) & Q(x)))(\x.dog(x)))(\x.bark(x))'))
|
| 308 |
+
((\P Q.exists x.(P(x) & Q(x)))(\x.dog(x)))(\x.bark(x))
|
| 309 |
+
>>> a = read_expr(r'exists c.exists b.A(b,c) & A(b,c)')
|
| 310 |
+
>>> b = read_expr(r'(exists c.(exists b.A(b,c))) & A(b,c)')
|
| 311 |
+
>>> print(a == b)
|
| 312 |
+
True
|
| 313 |
+
>>> a = read_expr(r'exists c.(exists b.A(b,c) & A(b,c))')
|
| 314 |
+
>>> b = read_expr(r'exists c.((exists b.A(b,c)) & A(b,c))')
|
| 315 |
+
>>> print(a == b)
|
| 316 |
+
True
|
| 317 |
+
>>> print(read_expr(r'exists x.x = y'))
|
| 318 |
+
exists x.(x = y)
|
| 319 |
+
>>> print(read_expr('A(B)(C)'))
|
| 320 |
+
A(B,C)
|
| 321 |
+
>>> print(read_expr('(A(B))(C)'))
|
| 322 |
+
A(B,C)
|
| 323 |
+
>>> print(read_expr('A((B)(C))'))
|
| 324 |
+
A(B(C))
|
| 325 |
+
>>> print(read_expr('A(B(C))'))
|
| 326 |
+
A(B(C))
|
| 327 |
+
>>> print(read_expr('(A)(B(C))'))
|
| 328 |
+
A(B(C))
|
| 329 |
+
>>> print(read_expr('(((A)))(((B))(((C))))'))
|
| 330 |
+
A(B(C))
|
| 331 |
+
>>> print(read_expr(r'A != B'))
|
| 332 |
+
-(A = B)
|
| 333 |
+
>>> print(read_expr('P(x) & x=y & P(y)'))
|
| 334 |
+
(P(x) & (x = y) & P(y))
|
| 335 |
+
>>> try: print(read_expr(r'\walk.walk(x)'))
|
| 336 |
+
... except LogicalExpressionException as e: print(e)
|
| 337 |
+
'walk' is an illegal variable name. Constants may not be abstracted.
|
| 338 |
+
\walk.walk(x)
|
| 339 |
+
^
|
| 340 |
+
>>> try: print(read_expr(r'all walk.walk(john)'))
|
| 341 |
+
... except LogicalExpressionException as e: print(e)
|
| 342 |
+
'walk' is an illegal variable name. Constants may not be quantified.
|
| 343 |
+
all walk.walk(john)
|
| 344 |
+
^
|
| 345 |
+
>>> try: print(read_expr(r'x(john)'))
|
| 346 |
+
... except LogicalExpressionException as e: print(e)
|
| 347 |
+
'x' is an illegal predicate name. Individual variables may not be used as predicates.
|
| 348 |
+
x(john)
|
| 349 |
+
^
|
| 350 |
+
|
| 351 |
+
>>> from nltk.sem.logic import LogicParser # hack to give access to custom quote chars
|
| 352 |
+
>>> lpq = LogicParser()
|
| 353 |
+
>>> lpq.quote_chars = [("'", "'", "\\", False)]
|
| 354 |
+
>>> print(lpq.parse(r"(man(x) & 'tall\'s,' (x) & walks (x) )"))
|
| 355 |
+
(man(x) & tall's,(x) & walks(x))
|
| 356 |
+
>>> lpq.quote_chars = [("'", "'", "\\", True)]
|
| 357 |
+
>>> print(lpq.parse(r"'tall\'s,'"))
|
| 358 |
+
'tall\'s,'
|
| 359 |
+
>>> print(lpq.parse(r"'spaced name(x)'"))
|
| 360 |
+
'spaced name(x)'
|
| 361 |
+
>>> print(lpq.parse(r"-'tall\'s,'(x)"))
|
| 362 |
+
-'tall\'s,'(x)
|
| 363 |
+
>>> print(lpq.parse(r"(man(x) & 'tall\'s,' (x) & walks (x) )"))
|
| 364 |
+
(man(x) & 'tall\'s,'(x) & walks(x))
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
Simplify
|
| 368 |
+
========
|
| 369 |
+
|
| 370 |
+
>>> print(read_expr(r'\x.man(x)(john)').simplify())
|
| 371 |
+
man(john)
|
| 372 |
+
>>> print(read_expr(r'\x.((man(x)))(john)').simplify())
|
| 373 |
+
man(john)
|
| 374 |
+
>>> print(read_expr(r'\x.\y.sees(x,y)(john, mary)').simplify())
|
| 375 |
+
sees(john,mary)
|
| 376 |
+
>>> print(read_expr(r'\x y.sees(x,y)(john, mary)').simplify())
|
| 377 |
+
sees(john,mary)
|
| 378 |
+
>>> print(read_expr(r'\x.\y.sees(x,y)(john)(mary)').simplify())
|
| 379 |
+
sees(john,mary)
|
| 380 |
+
>>> print(read_expr(r'\x y.sees(x,y)(john)(mary)').simplify())
|
| 381 |
+
sees(john,mary)
|
| 382 |
+
>>> print(read_expr(r'\x.\y.sees(x,y)(john)').simplify())
|
| 383 |
+
\y.sees(john,y)
|
| 384 |
+
>>> print(read_expr(r'\x y.sees(x,y)(john)').simplify())
|
| 385 |
+
\y.sees(john,y)
|
| 386 |
+
>>> print(read_expr(r'(\x.\y.sees(x,y)(john))(mary)').simplify())
|
| 387 |
+
sees(john,mary)
|
| 388 |
+
>>> print(read_expr(r'(\x y.sees(x,y)(john))(mary)').simplify())
|
| 389 |
+
sees(john,mary)
|
| 390 |
+
>>> print(read_expr(r'exists x.(man(x) & (\x.exists y.walks(x,y))(x))').simplify())
|
| 391 |
+
exists x.(man(x) & exists y.walks(x,y))
|
| 392 |
+
>>> e1 = read_expr(r'exists x.(man(x) & (\x.exists y.walks(x,y))(y))').simplify()
|
| 393 |
+
>>> e2 = read_expr(r'exists x.(man(x) & exists z1.walks(y,z1))')
|
| 394 |
+
>>> e1 == e2
|
| 395 |
+
True
|
| 396 |
+
>>> print(read_expr(r'(\P Q.exists x.(P(x) & Q(x)))(\x.dog(x))').simplify())
|
| 397 |
+
\Q.exists x.(dog(x) & Q(x))
|
| 398 |
+
>>> print(read_expr(r'((\P.\Q.exists x.(P(x) & Q(x)))(\x.dog(x)))(\x.bark(x))').simplify())
|
| 399 |
+
exists x.(dog(x) & bark(x))
|
| 400 |
+
>>> print(read_expr(r'\P.(P(x)(y))(\a b.Q(a,b))').simplify())
|
| 401 |
+
Q(x,y)
|
| 402 |
+
|
| 403 |
+
Replace
|
| 404 |
+
=======
|
| 405 |
+
|
| 406 |
+
>>> a = read_expr(r'a')
|
| 407 |
+
>>> x = read_expr(r'x')
|
| 408 |
+
>>> y = read_expr(r'y')
|
| 409 |
+
>>> z = read_expr(r'z')
|
| 410 |
+
|
| 411 |
+
>>> print(read_expr(r'man(x)').replace(x.variable, a, False))
|
| 412 |
+
man(a)
|
| 413 |
+
>>> print(read_expr(r'(man(x) & tall(x))').replace(x.variable, a, False))
|
| 414 |
+
(man(a) & tall(a))
|
| 415 |
+
>>> print(read_expr(r'exists x.man(x)').replace(x.variable, a, False))
|
| 416 |
+
exists x.man(x)
|
| 417 |
+
>>> print(read_expr(r'exists x.man(x)').replace(x.variable, a, True))
|
| 418 |
+
exists a.man(a)
|
| 419 |
+
>>> print(read_expr(r'exists x.give(x,y,z)').replace(y.variable, a, False))
|
| 420 |
+
exists x.give(x,a,z)
|
| 421 |
+
>>> print(read_expr(r'exists x.give(x,y,z)').replace(y.variable, a, True))
|
| 422 |
+
exists x.give(x,a,z)
|
| 423 |
+
>>> e1 = read_expr(r'exists x.give(x,y,z)').replace(y.variable, x, False)
|
| 424 |
+
>>> e2 = read_expr(r'exists z1.give(z1,x,z)')
|
| 425 |
+
>>> e1 == e2
|
| 426 |
+
True
|
| 427 |
+
>>> e1 = read_expr(r'exists x.give(x,y,z)').replace(y.variable, x, True)
|
| 428 |
+
>>> e2 = read_expr(r'exists z1.give(z1,x,z)')
|
| 429 |
+
>>> e1 == e2
|
| 430 |
+
True
|
| 431 |
+
>>> print(read_expr(r'\x y z.give(x,y,z)').replace(y.variable, a, False))
|
| 432 |
+
\x y z.give(x,y,z)
|
| 433 |
+
>>> print(read_expr(r'\x y z.give(x,y,z)').replace(y.variable, a, True))
|
| 434 |
+
\x a z.give(x,a,z)
|
| 435 |
+
>>> print(read_expr(r'\x.\y.give(x,y,z)').replace(z.variable, a, False))
|
| 436 |
+
\x y.give(x,y,a)
|
| 437 |
+
>>> print(read_expr(r'\x.\y.give(x,y,z)').replace(z.variable, a, True))
|
| 438 |
+
\x y.give(x,y,a)
|
| 439 |
+
>>> e1 = read_expr(r'\x.\y.give(x,y,z)').replace(z.variable, x, False)
|
| 440 |
+
>>> e2 = read_expr(r'\z1.\y.give(z1,y,x)')
|
| 441 |
+
>>> e1 == e2
|
| 442 |
+
True
|
| 443 |
+
>>> e1 = read_expr(r'\x.\y.give(x,y,z)').replace(z.variable, x, True)
|
| 444 |
+
>>> e2 = read_expr(r'\z1.\y.give(z1,y,x)')
|
| 445 |
+
>>> e1 == e2
|
| 446 |
+
True
|
| 447 |
+
>>> print(read_expr(r'\x.give(x,y,z)').replace(z.variable, y, False))
|
| 448 |
+
\x.give(x,y,y)
|
| 449 |
+
>>> print(read_expr(r'\x.give(x,y,z)').replace(z.variable, y, True))
|
| 450 |
+
\x.give(x,y,y)
|
| 451 |
+
|
| 452 |
+
>>> from nltk.sem import logic
|
| 453 |
+
>>> logic._counter._value = 0
|
| 454 |
+
>>> e1 = read_expr('e1')
|
| 455 |
+
>>> e2 = read_expr('e2')
|
| 456 |
+
>>> print(read_expr('exists e1 e2.(walk(e1) & talk(e2))').replace(e1.variable, e2, True))
|
| 457 |
+
exists e2 e01.(walk(e2) & talk(e01))
|
| 458 |
+
|
| 459 |
+
|
| 460 |
+
Variables / Free
|
| 461 |
+
================
|
| 462 |
+
|
| 463 |
+
>>> examples = [r'walk(john)',
|
| 464 |
+
... r'walk(x)',
|
| 465 |
+
... r'?vp(?np)',
|
| 466 |
+
... r'see(john,mary)',
|
| 467 |
+
... r'exists x.walk(x)',
|
| 468 |
+
... r'\x.see(john,x)',
|
| 469 |
+
... r'\x.see(john,x)(mary)',
|
| 470 |
+
... r'P(x)',
|
| 471 |
+
... r'\P.P(x)',
|
| 472 |
+
... r'aa(x,bb(y),cc(z),P(w),u)',
|
| 473 |
+
... r'bo(?det(?n),@x)']
|
| 474 |
+
>>> examples = [read_expr(e) for e in examples]
|
| 475 |
+
|
| 476 |
+
>>> for e in examples:
|
| 477 |
+
... print('%-25s' % e, sorted(e.free()))
|
| 478 |
+
walk(john) []
|
| 479 |
+
walk(x) [Variable('x')]
|
| 480 |
+
?vp(?np) []
|
| 481 |
+
see(john,mary) []
|
| 482 |
+
exists x.walk(x) []
|
| 483 |
+
\x.see(john,x) []
|
| 484 |
+
(\x.see(john,x))(mary) []
|
| 485 |
+
P(x) [Variable('P'), Variable('x')]
|
| 486 |
+
\P.P(x) [Variable('x')]
|
| 487 |
+
aa(x,bb(y),cc(z),P(w),u) [Variable('P'), Variable('u'), Variable('w'), Variable('x'), Variable('y'), Variable('z')]
|
| 488 |
+
bo(?det(?n),@x) []
|
| 489 |
+
|
| 490 |
+
>>> for e in examples:
|
| 491 |
+
... print('%-25s' % e, sorted(e.constants()))
|
| 492 |
+
walk(john) [Variable('john')]
|
| 493 |
+
walk(x) []
|
| 494 |
+
?vp(?np) [Variable('?np')]
|
| 495 |
+
see(john,mary) [Variable('john'), Variable('mary')]
|
| 496 |
+
exists x.walk(x) []
|
| 497 |
+
\x.see(john,x) [Variable('john')]
|
| 498 |
+
(\x.see(john,x))(mary) [Variable('john'), Variable('mary')]
|
| 499 |
+
P(x) []
|
| 500 |
+
\P.P(x) []
|
| 501 |
+
aa(x,bb(y),cc(z),P(w),u) []
|
| 502 |
+
bo(?det(?n),@x) [Variable('?n'), Variable('@x')]
|
| 503 |
+
|
| 504 |
+
>>> for e in examples:
|
| 505 |
+
... print('%-25s' % e, sorted(e.predicates()))
|
| 506 |
+
walk(john) [Variable('walk')]
|
| 507 |
+
walk(x) [Variable('walk')]
|
| 508 |
+
?vp(?np) [Variable('?vp')]
|
| 509 |
+
see(john,mary) [Variable('see')]
|
| 510 |
+
exists x.walk(x) [Variable('walk')]
|
| 511 |
+
\x.see(john,x) [Variable('see')]
|
| 512 |
+
(\x.see(john,x))(mary) [Variable('see')]
|
| 513 |
+
P(x) []
|
| 514 |
+
\P.P(x) []
|
| 515 |
+
aa(x,bb(y),cc(z),P(w),u) [Variable('aa'), Variable('bb'), Variable('cc')]
|
| 516 |
+
bo(?det(?n),@x) [Variable('?det'), Variable('bo')]
|
| 517 |
+
|
| 518 |
+
>>> for e in examples:
|
| 519 |
+
... print('%-25s' % e, sorted(e.variables()))
|
| 520 |
+
walk(john) []
|
| 521 |
+
walk(x) [Variable('x')]
|
| 522 |
+
?vp(?np) [Variable('?np'), Variable('?vp')]
|
| 523 |
+
see(john,mary) []
|
| 524 |
+
exists x.walk(x) []
|
| 525 |
+
\x.see(john,x) []
|
| 526 |
+
(\x.see(john,x))(mary) []
|
| 527 |
+
P(x) [Variable('P'), Variable('x')]
|
| 528 |
+
\P.P(x) [Variable('x')]
|
| 529 |
+
aa(x,bb(y),cc(z),P(w),u) [Variable('P'), Variable('u'), Variable('w'), Variable('x'), Variable('y'), Variable('z')]
|
| 530 |
+
bo(?det(?n),@x) [Variable('?det'), Variable('?n'), Variable('@x')]
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
|
| 534 |
+
`normalize`
|
| 535 |
+
>>> print(read_expr(r'\e083.(walk(e083, z472) & talk(e092, z938))').normalize())
|
| 536 |
+
\e01.(walk(e01,z3) & talk(e02,z4))
|
| 537 |
+
|
| 538 |
+
Typed Logic
|
| 539 |
+
+++++++++++
|
| 540 |
+
|
| 541 |
+
>>> from nltk.sem.logic import LogicParser
|
| 542 |
+
>>> tlp = LogicParser(True)
|
| 543 |
+
>>> print(tlp.parse(r'man(x)').type)
|
| 544 |
+
?
|
| 545 |
+
>>> print(tlp.parse(r'walk(angus)').type)
|
| 546 |
+
?
|
| 547 |
+
>>> print(tlp.parse(r'-man(x)').type)
|
| 548 |
+
t
|
| 549 |
+
>>> print(tlp.parse(r'(man(x) <-> tall(x))').type)
|
| 550 |
+
t
|
| 551 |
+
>>> print(tlp.parse(r'exists x.(man(x) & tall(x))').type)
|
| 552 |
+
t
|
| 553 |
+
>>> print(tlp.parse(r'\x.man(x)').type)
|
| 554 |
+
<e,?>
|
| 555 |
+
>>> print(tlp.parse(r'john').type)
|
| 556 |
+
e
|
| 557 |
+
>>> print(tlp.parse(r'\x y.sees(x,y)').type)
|
| 558 |
+
<e,<e,?>>
|
| 559 |
+
>>> print(tlp.parse(r'\x.man(x)(john)').type)
|
| 560 |
+
?
|
| 561 |
+
>>> print(tlp.parse(r'\x.\y.sees(x,y)(john)').type)
|
| 562 |
+
<e,?>
|
| 563 |
+
>>> print(tlp.parse(r'\x.\y.sees(x,y)(john)(mary)').type)
|
| 564 |
+
?
|
| 565 |
+
>>> print(tlp.parse(r'\P.\Q.exists x.(P(x) & Q(x))').type)
|
| 566 |
+
<<e,t>,<<e,t>,t>>
|
| 567 |
+
>>> print(tlp.parse(r'\x.y').type)
|
| 568 |
+
<?,e>
|
| 569 |
+
>>> print(tlp.parse(r'\P.P(x)').type)
|
| 570 |
+
<<e,?>,?>
|
| 571 |
+
|
| 572 |
+
>>> parsed = tlp.parse('see(john,mary)')
|
| 573 |
+
>>> print(parsed.type)
|
| 574 |
+
?
|
| 575 |
+
>>> print(parsed.function)
|
| 576 |
+
see(john)
|
| 577 |
+
>>> print(parsed.function.type)
|
| 578 |
+
<e,?>
|
| 579 |
+
>>> print(parsed.function.function)
|
| 580 |
+
see
|
| 581 |
+
>>> print(parsed.function.function.type)
|
| 582 |
+
<e,<e,?>>
|
| 583 |
+
|
| 584 |
+
>>> parsed = tlp.parse('P(x,y)')
|
| 585 |
+
>>> print(parsed)
|
| 586 |
+
P(x,y)
|
| 587 |
+
>>> print(parsed.type)
|
| 588 |
+
?
|
| 589 |
+
>>> print(parsed.function)
|
| 590 |
+
P(x)
|
| 591 |
+
>>> print(parsed.function.type)
|
| 592 |
+
<e,?>
|
| 593 |
+
>>> print(parsed.function.function)
|
| 594 |
+
P
|
| 595 |
+
>>> print(parsed.function.function.type)
|
| 596 |
+
<e,<e,?>>
|
| 597 |
+
|
| 598 |
+
>>> print(tlp.parse(r'P').type)
|
| 599 |
+
?
|
| 600 |
+
|
| 601 |
+
>>> print(tlp.parse(r'P', {'P': 't'}).type)
|
| 602 |
+
t
|
| 603 |
+
|
| 604 |
+
>>> a = tlp.parse(r'P(x)')
|
| 605 |
+
>>> print(a.type)
|
| 606 |
+
?
|
| 607 |
+
>>> print(a.function.type)
|
| 608 |
+
<e,?>
|
| 609 |
+
>>> print(a.argument.type)
|
| 610 |
+
e
|
| 611 |
+
|
| 612 |
+
>>> a = tlp.parse(r'-P(x)')
|
| 613 |
+
>>> print(a.type)
|
| 614 |
+
t
|
| 615 |
+
>>> print(a.term.type)
|
| 616 |
+
t
|
| 617 |
+
>>> print(a.term.function.type)
|
| 618 |
+
<e,t>
|
| 619 |
+
>>> print(a.term.argument.type)
|
| 620 |
+
e
|
| 621 |
+
|
| 622 |
+
>>> a = tlp.parse(r'P & Q')
|
| 623 |
+
>>> print(a.type)
|
| 624 |
+
t
|
| 625 |
+
>>> print(a.first.type)
|
| 626 |
+
t
|
| 627 |
+
>>> print(a.second.type)
|
| 628 |
+
t
|
| 629 |
+
|
| 630 |
+
>>> a = tlp.parse(r'(P(x) & Q(x))')
|
| 631 |
+
>>> print(a.type)
|
| 632 |
+
t
|
| 633 |
+
>>> print(a.first.type)
|
| 634 |
+
t
|
| 635 |
+
>>> print(a.first.function.type)
|
| 636 |
+
<e,t>
|
| 637 |
+
>>> print(a.first.argument.type)
|
| 638 |
+
e
|
| 639 |
+
>>> print(a.second.type)
|
| 640 |
+
t
|
| 641 |
+
>>> print(a.second.function.type)
|
| 642 |
+
<e,t>
|
| 643 |
+
>>> print(a.second.argument.type)
|
| 644 |
+
e
|
| 645 |
+
|
| 646 |
+
>>> a = tlp.parse(r'\x.P(x)')
|
| 647 |
+
>>> print(a.type)
|
| 648 |
+
<e,?>
|
| 649 |
+
>>> print(a.term.function.type)
|
| 650 |
+
<e,?>
|
| 651 |
+
>>> print(a.term.argument.type)
|
| 652 |
+
e
|
| 653 |
+
|
| 654 |
+
>>> a = tlp.parse(r'\P.P(x)')
|
| 655 |
+
>>> print(a.type)
|
| 656 |
+
<<e,?>,?>
|
| 657 |
+
>>> print(a.term.function.type)
|
| 658 |
+
<e,?>
|
| 659 |
+
>>> print(a.term.argument.type)
|
| 660 |
+
e
|
| 661 |
+
|
| 662 |
+
>>> a = tlp.parse(r'(\x.P(x)(john)) & Q(x)')
|
| 663 |
+
>>> print(a.type)
|
| 664 |
+
t
|
| 665 |
+
>>> print(a.first.type)
|
| 666 |
+
t
|
| 667 |
+
>>> print(a.first.function.type)
|
| 668 |
+
<e,t>
|
| 669 |
+
>>> print(a.first.function.term.function.type)
|
| 670 |
+
<e,t>
|
| 671 |
+
>>> print(a.first.function.term.argument.type)
|
| 672 |
+
e
|
| 673 |
+
>>> print(a.first.argument.type)
|
| 674 |
+
e
|
| 675 |
+
|
| 676 |
+
>>> a = tlp.parse(r'\x y.P(x,y)(john)(mary) & Q(x)')
|
| 677 |
+
>>> print(a.type)
|
| 678 |
+
t
|
| 679 |
+
>>> print(a.first.type)
|
| 680 |
+
t
|
| 681 |
+
>>> print(a.first.function.type)
|
| 682 |
+
<e,t>
|
| 683 |
+
>>> print(a.first.function.function.type)
|
| 684 |
+
<e,<e,t>>
|
| 685 |
+
|
| 686 |
+
>>> a = tlp.parse(r'--P')
|
| 687 |
+
>>> print(a.type)
|
| 688 |
+
t
|
| 689 |
+
>>> print(a.term.type)
|
| 690 |
+
t
|
| 691 |
+
>>> print(a.term.term.type)
|
| 692 |
+
t
|
| 693 |
+
|
| 694 |
+
>>> tlp.parse(r'\x y.P(x,y)').type
|
| 695 |
+
<e,<e,?>>
|
| 696 |
+
>>> tlp.parse(r'\x y.P(x,y)', {'P': '<e,<e,t>>'}).type
|
| 697 |
+
<e,<e,t>>
|
| 698 |
+
|
| 699 |
+
>>> a = tlp.parse(r'\P y.P(john,y)(\x y.see(x,y))')
|
| 700 |
+
>>> a.type
|
| 701 |
+
<e,?>
|
| 702 |
+
>>> a.function.type
|
| 703 |
+
<<e,<e,?>>,<e,?>>
|
| 704 |
+
>>> a.function.term.term.function.function.type
|
| 705 |
+
<e,<e,?>>
|
| 706 |
+
>>> a.argument.type
|
| 707 |
+
<e,<e,?>>
|
| 708 |
+
|
| 709 |
+
>>> a = tlp.parse(r'exists c f.(father(c) = f)')
|
| 710 |
+
>>> a.type
|
| 711 |
+
t
|
| 712 |
+
>>> a.term.term.type
|
| 713 |
+
t
|
| 714 |
+
>>> a.term.term.first.type
|
| 715 |
+
e
|
| 716 |
+
>>> a.term.term.first.function.type
|
| 717 |
+
<e,e>
|
| 718 |
+
>>> a.term.term.second.type
|
| 719 |
+
e
|
| 720 |
+
|
| 721 |
+
typecheck()
|
| 722 |
+
|
| 723 |
+
>>> a = tlp.parse('P(x)')
|
| 724 |
+
>>> b = tlp.parse('Q(x)')
|
| 725 |
+
>>> a.type
|
| 726 |
+
?
|
| 727 |
+
>>> c = a & b
|
| 728 |
+
>>> c.first.type
|
| 729 |
+
?
|
| 730 |
+
>>> c.typecheck()
|
| 731 |
+
{...}
|
| 732 |
+
>>> c.first.type
|
| 733 |
+
t
|
| 734 |
+
|
| 735 |
+
>>> a = tlp.parse('P(x)')
|
| 736 |
+
>>> b = tlp.parse('P(x) & Q(x)')
|
| 737 |
+
>>> a.type
|
| 738 |
+
?
|
| 739 |
+
>>> typecheck([a,b])
|
| 740 |
+
{...}
|
| 741 |
+
>>> a.type
|
| 742 |
+
t
|
| 743 |
+
|
| 744 |
+
>>> e = tlp.parse(r'man(x)')
|
| 745 |
+
>>> print(dict((k,str(v)) for k,v in e.typecheck().items()) == {'x': 'e', 'man': '<e,?>'})
|
| 746 |
+
True
|
| 747 |
+
>>> sig = {'man': '<e, t>'}
|
| 748 |
+
>>> e = tlp.parse(r'man(x)', sig)
|
| 749 |
+
>>> print(e.function.type)
|
| 750 |
+
<e,t>
|
| 751 |
+
>>> print(dict((k,str(v)) for k,v in e.typecheck().items()) == {'x': 'e', 'man': '<e,t>'})
|
| 752 |
+
True
|
| 753 |
+
>>> print(e.function.type)
|
| 754 |
+
<e,t>
|
| 755 |
+
>>> print(dict((k,str(v)) for k,v in e.typecheck(sig).items()) == {'x': 'e', 'man': '<e,t>'})
|
| 756 |
+
True
|
| 757 |
+
|
| 758 |
+
findtype()
|
| 759 |
+
|
| 760 |
+
>>> print(tlp.parse(r'man(x)').findtype(Variable('man')))
|
| 761 |
+
<e,?>
|
| 762 |
+
>>> print(tlp.parse(r'see(x,y)').findtype(Variable('see')))
|
| 763 |
+
<e,<e,?>>
|
| 764 |
+
>>> print(tlp.parse(r'P(Q(R(x)))').findtype(Variable('Q')))
|
| 765 |
+
?
|
| 766 |
+
|
| 767 |
+
reading types from strings
|
| 768 |
+
|
| 769 |
+
>>> Type.fromstring('e')
|
| 770 |
+
e
|
| 771 |
+
>>> Type.fromstring('<e,t>')
|
| 772 |
+
<e,t>
|
| 773 |
+
>>> Type.fromstring('<<e,t>,<e,t>>')
|
| 774 |
+
<<e,t>,<e,t>>
|
| 775 |
+
>>> Type.fromstring('<<e,?>,?>')
|
| 776 |
+
<<e,?>,?>
|
| 777 |
+
|
| 778 |
+
alternative type format
|
| 779 |
+
|
| 780 |
+
>>> Type.fromstring('e').str()
|
| 781 |
+
'IND'
|
| 782 |
+
>>> Type.fromstring('<e,?>').str()
|
| 783 |
+
'(IND -> ANY)'
|
| 784 |
+
>>> Type.fromstring('<<e,t>,t>').str()
|
| 785 |
+
'((IND -> BOOL) -> BOOL)'
|
| 786 |
+
|
| 787 |
+
Type.__eq__()
|
| 788 |
+
|
| 789 |
+
>>> from nltk.sem.logic import *
|
| 790 |
+
|
| 791 |
+
>>> e = ENTITY_TYPE
|
| 792 |
+
>>> t = TRUTH_TYPE
|
| 793 |
+
>>> a = ANY_TYPE
|
| 794 |
+
>>> et = ComplexType(e,t)
|
| 795 |
+
>>> eet = ComplexType(e,ComplexType(e,t))
|
| 796 |
+
>>> at = ComplexType(a,t)
|
| 797 |
+
>>> ea = ComplexType(e,a)
|
| 798 |
+
>>> aa = ComplexType(a,a)
|
| 799 |
+
|
| 800 |
+
>>> e == e
|
| 801 |
+
True
|
| 802 |
+
>>> t == t
|
| 803 |
+
True
|
| 804 |
+
>>> e == t
|
| 805 |
+
False
|
| 806 |
+
>>> a == t
|
| 807 |
+
False
|
| 808 |
+
>>> t == a
|
| 809 |
+
False
|
| 810 |
+
>>> a == a
|
| 811 |
+
True
|
| 812 |
+
>>> et == et
|
| 813 |
+
True
|
| 814 |
+
>>> a == et
|
| 815 |
+
False
|
| 816 |
+
>>> et == a
|
| 817 |
+
False
|
| 818 |
+
>>> a == ComplexType(a,aa)
|
| 819 |
+
True
|
| 820 |
+
>>> ComplexType(a,aa) == a
|
| 821 |
+
True
|
| 822 |
+
|
| 823 |
+
matches()
|
| 824 |
+
|
| 825 |
+
>>> e.matches(t)
|
| 826 |
+
False
|
| 827 |
+
>>> a.matches(t)
|
| 828 |
+
True
|
| 829 |
+
>>> t.matches(a)
|
| 830 |
+
True
|
| 831 |
+
>>> a.matches(et)
|
| 832 |
+
True
|
| 833 |
+
>>> et.matches(a)
|
| 834 |
+
True
|
| 835 |
+
>>> ea.matches(eet)
|
| 836 |
+
True
|
| 837 |
+
>>> eet.matches(ea)
|
| 838 |
+
True
|
| 839 |
+
>>> aa.matches(et)
|
| 840 |
+
True
|
| 841 |
+
>>> aa.matches(t)
|
| 842 |
+
True
|
| 843 |
+
|
| 844 |
+
Type error during parsing
|
| 845 |
+
=========================
|
| 846 |
+
|
| 847 |
+
>>> try: print(tlp.parse(r'exists x y.(P(x) & P(x,y))'))
|
| 848 |
+
... except InconsistentTypeHierarchyException as e: print(e)
|
| 849 |
+
The variable 'P' was found in multiple places with different types.
|
| 850 |
+
>>> try: tlp.parse(r'\x y.see(x,y)(\x.man(x))')
|
| 851 |
+
... except TypeException as e: print(e)
|
| 852 |
+
The function '\x y.see(x,y)' is of type '<e,<e,?>>' and cannot be applied to '\x.man(x)' of type '<e,?>'. Its argument must match type 'e'.
|
| 853 |
+
>>> try: tlp.parse(r'\P x y.-P(x,y)(\x.-man(x))')
|
| 854 |
+
... except TypeException as e: print(e)
|
| 855 |
+
The function '\P x y.-P(x,y)' is of type '<<e,<e,t>>,<e,<e,t>>>' and cannot be applied to '\x.-man(x)' of type '<e,t>'. Its argument must match type '<e,<e,t>>'.
|
| 856 |
+
|
| 857 |
+
>>> a = tlp.parse(r'-talk(x)')
|
| 858 |
+
>>> signature = a.typecheck()
|
| 859 |
+
>>> try: print(tlp.parse(r'-talk(x,y)', signature))
|
| 860 |
+
... except InconsistentTypeHierarchyException as e: print(e)
|
| 861 |
+
The variable 'talk' was found in multiple places with different types.
|
| 862 |
+
|
| 863 |
+
>>> a = tlp.parse(r'-P(x)')
|
| 864 |
+
>>> b = tlp.parse(r'-P(x,y)')
|
| 865 |
+
>>> a.typecheck()
|
| 866 |
+
{...}
|
| 867 |
+
>>> b.typecheck()
|
| 868 |
+
{...}
|
| 869 |
+
>>> try: typecheck([a,b])
|
| 870 |
+
... except InconsistentTypeHierarchyException as e: print(e)
|
| 871 |
+
The variable 'P' was found in multiple places with different types.
|
| 872 |
+
|
| 873 |
+
>>> a = tlp.parse(r'P(x)')
|
| 874 |
+
>>> b = tlp.parse(r'P(x,y)')
|
| 875 |
+
>>> signature = {'P': '<e,t>'}
|
| 876 |
+
>>> a.typecheck(signature)
|
| 877 |
+
{...}
|
| 878 |
+
>>> try: typecheck([a,b], signature)
|
| 879 |
+
... except InconsistentTypeHierarchyException as e: print(e)
|
| 880 |
+
The variable 'P' was found in multiple places with different types.
|
| 881 |
+
|
| 882 |
+
Parse errors
|
| 883 |
+
============
|
| 884 |
+
|
| 885 |
+
>>> try: read_expr(r'')
|
| 886 |
+
... except LogicalExpressionException as e: print(e)
|
| 887 |
+
End of input found. Expression expected.
|
| 888 |
+
<BLANKLINE>
|
| 889 |
+
^
|
| 890 |
+
>>> try: read_expr(r'(')
|
| 891 |
+
... except LogicalExpressionException as e: print(e)
|
| 892 |
+
End of input found. Expression expected.
|
| 893 |
+
(
|
| 894 |
+
^
|
| 895 |
+
>>> try: read_expr(r')')
|
| 896 |
+
... except LogicalExpressionException as e: print(e)
|
| 897 |
+
Unexpected token: ')'. Expression expected.
|
| 898 |
+
)
|
| 899 |
+
^
|
| 900 |
+
>>> try: read_expr(r'()')
|
| 901 |
+
... except LogicalExpressionException as e: print(e)
|
| 902 |
+
Unexpected token: ')'. Expression expected.
|
| 903 |
+
()
|
| 904 |
+
^
|
| 905 |
+
>>> try: read_expr(r'(P(x) & Q(x)')
|
| 906 |
+
... except LogicalExpressionException as e: print(e)
|
| 907 |
+
End of input found. Expected token ')'.
|
| 908 |
+
(P(x) & Q(x)
|
| 909 |
+
^
|
| 910 |
+
>>> try: read_expr(r'(P(x) &')
|
| 911 |
+
... except LogicalExpressionException as e: print(e)
|
| 912 |
+
End of input found. Expression expected.
|
| 913 |
+
(P(x) &
|
| 914 |
+
^
|
| 915 |
+
>>> try: read_expr(r'(P(x) | )')
|
| 916 |
+
... except LogicalExpressionException as e: print(e)
|
| 917 |
+
Unexpected token: ')'. Expression expected.
|
| 918 |
+
(P(x) | )
|
| 919 |
+
^
|
| 920 |
+
>>> try: read_expr(r'P(x) ->')
|
| 921 |
+
... except LogicalExpressionException as e: print(e)
|
| 922 |
+
End of input found. Expression expected.
|
| 923 |
+
P(x) ->
|
| 924 |
+
^
|
| 925 |
+
>>> try: read_expr(r'P(x')
|
| 926 |
+
... except LogicalExpressionException as e: print(e)
|
| 927 |
+
End of input found. Expected token ')'.
|
| 928 |
+
P(x
|
| 929 |
+
^
|
| 930 |
+
>>> try: read_expr(r'P(x,')
|
| 931 |
+
... except LogicalExpressionException as e: print(e)
|
| 932 |
+
End of input found. Expression expected.
|
| 933 |
+
P(x,
|
| 934 |
+
^
|
| 935 |
+
>>> try: read_expr(r'P(x,)')
|
| 936 |
+
... except LogicalExpressionException as e: print(e)
|
| 937 |
+
Unexpected token: ')'. Expression expected.
|
| 938 |
+
P(x,)
|
| 939 |
+
^
|
| 940 |
+
>>> try: read_expr(r'exists')
|
| 941 |
+
... except LogicalExpressionException as e: print(e)
|
| 942 |
+
End of input found. Variable and Expression expected following quantifier 'exists'.
|
| 943 |
+
exists
|
| 944 |
+
^
|
| 945 |
+
>>> try: read_expr(r'exists x')
|
| 946 |
+
... except LogicalExpressionException as e: print(e)
|
| 947 |
+
End of input found. Expression expected.
|
| 948 |
+
exists x
|
| 949 |
+
^
|
| 950 |
+
>>> try: read_expr(r'exists x.')
|
| 951 |
+
... except LogicalExpressionException as e: print(e)
|
| 952 |
+
End of input found. Expression expected.
|
| 953 |
+
exists x.
|
| 954 |
+
^
|
| 955 |
+
>>> try: read_expr(r'\ ')
|
| 956 |
+
... except LogicalExpressionException as e: print(e)
|
| 957 |
+
End of input found. Variable and Expression expected following lambda operator.
|
| 958 |
+
\
|
| 959 |
+
^
|
| 960 |
+
>>> try: read_expr(r'\ x')
|
| 961 |
+
... except LogicalExpressionException as e: print(e)
|
| 962 |
+
End of input found. Expression expected.
|
| 963 |
+
\ x
|
| 964 |
+
^
|
| 965 |
+
>>> try: read_expr(r'\ x y')
|
| 966 |
+
... except LogicalExpressionException as e: print(e)
|
| 967 |
+
End of input found. Expression expected.
|
| 968 |
+
\ x y
|
| 969 |
+
^
|
| 970 |
+
>>> try: read_expr(r'\ x.')
|
| 971 |
+
... except LogicalExpressionException as e: print(e)
|
| 972 |
+
End of input found. Expression expected.
|
| 973 |
+
\ x.
|
| 974 |
+
^
|
| 975 |
+
>>> try: read_expr(r'P(x)Q(x)')
|
| 976 |
+
... except LogicalExpressionException as e: print(e)
|
| 977 |
+
Unexpected token: 'Q'.
|
| 978 |
+
P(x)Q(x)
|
| 979 |
+
^
|
| 980 |
+
>>> try: read_expr(r'(P(x)Q(x)')
|
| 981 |
+
... except LogicalExpressionException as e: print(e)
|
| 982 |
+
Unexpected token: 'Q'. Expected token ')'.
|
| 983 |
+
(P(x)Q(x)
|
| 984 |
+
^
|
| 985 |
+
>>> try: read_expr(r'exists x y')
|
| 986 |
+
... except LogicalExpressionException as e: print(e)
|
| 987 |
+
End of input found. Expression expected.
|
| 988 |
+
exists x y
|
| 989 |
+
^
|
| 990 |
+
>>> try: read_expr(r'exists x y.')
|
| 991 |
+
... except LogicalExpressionException as e: print(e)
|
| 992 |
+
End of input found. Expression expected.
|
| 993 |
+
exists x y.
|
| 994 |
+
^
|
| 995 |
+
>>> try: read_expr(r'exists x -> y')
|
| 996 |
+
... except LogicalExpressionException as e: print(e)
|
| 997 |
+
Unexpected token: '->'. Expression expected.
|
| 998 |
+
exists x -> y
|
| 999 |
+
^
|
| 1000 |
+
|
| 1001 |
+
|
| 1002 |
+
>>> try: read_expr(r'A -> ((P(x) & Q(x)) -> Z')
|
| 1003 |
+
... except LogicalExpressionException as e: print(e)
|
| 1004 |
+
End of input found. Expected token ')'.
|
| 1005 |
+
A -> ((P(x) & Q(x)) -> Z
|
| 1006 |
+
^
|
| 1007 |
+
>>> try: read_expr(r'A -> ((P(x) &) -> Z')
|
| 1008 |
+
... except LogicalExpressionException as e: print(e)
|
| 1009 |
+
Unexpected token: ')'. Expression expected.
|
| 1010 |
+
A -> ((P(x) &) -> Z
|
| 1011 |
+
^
|
| 1012 |
+
>>> try: read_expr(r'A -> ((P(x) | )) -> Z')
|
| 1013 |
+
... except LogicalExpressionException as e: print(e)
|
| 1014 |
+
Unexpected token: ')'. Expression expected.
|
| 1015 |
+
A -> ((P(x) | )) -> Z
|
| 1016 |
+
^
|
| 1017 |
+
>>> try: read_expr(r'A -> (P(x) ->) -> Z')
|
| 1018 |
+
... except LogicalExpressionException as e: print(e)
|
| 1019 |
+
Unexpected token: ')'. Expression expected.
|
| 1020 |
+
A -> (P(x) ->) -> Z
|
| 1021 |
+
^
|
| 1022 |
+
>>> try: read_expr(r'A -> (P(x) -> Z')
|
| 1023 |
+
... except LogicalExpressionException as e: print(e)
|
| 1024 |
+
End of input found. Expected token ')'.
|
| 1025 |
+
A -> (P(x) -> Z
|
| 1026 |
+
^
|
| 1027 |
+
>>> try: read_expr(r'A -> (P(x,) -> Z')
|
| 1028 |
+
... except LogicalExpressionException as e: print(e)
|
| 1029 |
+
Unexpected token: ')'. Expression expected.
|
| 1030 |
+
A -> (P(x,) -> Z
|
| 1031 |
+
^
|
| 1032 |
+
>>> try: read_expr(r'A -> (P(x,)) -> Z')
|
| 1033 |
+
... except LogicalExpressionException as e: print(e)
|
| 1034 |
+
Unexpected token: ')'. Expression expected.
|
| 1035 |
+
A -> (P(x,)) -> Z
|
| 1036 |
+
^
|
| 1037 |
+
>>> try: read_expr(r'A -> (exists) -> Z')
|
| 1038 |
+
... except LogicalExpressionException as e: print(e)
|
| 1039 |
+
')' is an illegal variable name. Constants may not be quantified.
|
| 1040 |
+
A -> (exists) -> Z
|
| 1041 |
+
^
|
| 1042 |
+
>>> try: read_expr(r'A -> (exists x) -> Z')
|
| 1043 |
+
... except LogicalExpressionException as e: print(e)
|
| 1044 |
+
Unexpected token: ')'. Expression expected.
|
| 1045 |
+
A -> (exists x) -> Z
|
| 1046 |
+
^
|
| 1047 |
+
>>> try: read_expr(r'A -> (exists x.) -> Z')
|
| 1048 |
+
... except LogicalExpressionException as e: print(e)
|
| 1049 |
+
Unexpected token: ')'. Expression expected.
|
| 1050 |
+
A -> (exists x.) -> Z
|
| 1051 |
+
^
|
| 1052 |
+
>>> try: read_expr(r'A -> (\ ) -> Z')
|
| 1053 |
+
... except LogicalExpressionException as e: print(e)
|
| 1054 |
+
')' is an illegal variable name. Constants may not be abstracted.
|
| 1055 |
+
A -> (\ ) -> Z
|
| 1056 |
+
^
|
| 1057 |
+
>>> try: read_expr(r'A -> (\ x) -> Z')
|
| 1058 |
+
... except LogicalExpressionException as e: print(e)
|
| 1059 |
+
Unexpected token: ')'. Expression expected.
|
| 1060 |
+
A -> (\ x) -> Z
|
| 1061 |
+
^
|
| 1062 |
+
>>> try: read_expr(r'A -> (\ x y) -> Z')
|
| 1063 |
+
... except LogicalExpressionException as e: print(e)
|
| 1064 |
+
Unexpected token: ')'. Expression expected.
|
| 1065 |
+
A -> (\ x y) -> Z
|
| 1066 |
+
^
|
| 1067 |
+
>>> try: read_expr(r'A -> (\ x.) -> Z')
|
| 1068 |
+
... except LogicalExpressionException as e: print(e)
|
| 1069 |
+
Unexpected token: ')'. Expression expected.
|
| 1070 |
+
A -> (\ x.) -> Z
|
| 1071 |
+
^
|
| 1072 |
+
>>> try: read_expr(r'A -> (P(x)Q(x)) -> Z')
|
| 1073 |
+
... except LogicalExpressionException as e: print(e)
|
| 1074 |
+
Unexpected token: 'Q'. Expected token ')'.
|
| 1075 |
+
A -> (P(x)Q(x)) -> Z
|
| 1076 |
+
^
|
| 1077 |
+
>>> try: read_expr(r'A -> ((P(x)Q(x)) -> Z')
|
| 1078 |
+
... except LogicalExpressionException as e: print(e)
|
| 1079 |
+
Unexpected token: 'Q'. Expected token ')'.
|
| 1080 |
+
A -> ((P(x)Q(x)) -> Z
|
| 1081 |
+
^
|
| 1082 |
+
>>> try: read_expr(r'A -> (all x y) -> Z')
|
| 1083 |
+
... except LogicalExpressionException as e: print(e)
|
| 1084 |
+
Unexpected token: ')'. Expression expected.
|
| 1085 |
+
A -> (all x y) -> Z
|
| 1086 |
+
^
|
| 1087 |
+
>>> try: read_expr(r'A -> (exists x y.) -> Z')
|
| 1088 |
+
... except LogicalExpressionException as e: print(e)
|
| 1089 |
+
Unexpected token: ')'. Expression expected.
|
| 1090 |
+
A -> (exists x y.) -> Z
|
| 1091 |
+
^
|
| 1092 |
+
>>> try: read_expr(r'A -> (exists x -> y) -> Z')
|
| 1093 |
+
... except LogicalExpressionException as e: print(e)
|
| 1094 |
+
Unexpected token: '->'. Expression expected.
|
| 1095 |
+
A -> (exists x -> y) -> Z
|
| 1096 |
+
^
|
openflamingo/lib/python3.10/site-packages/nltk/test/meteor.doctest
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2024 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
.. -*- coding: utf-8 -*-
|
| 5 |
+
|
| 6 |
+
=============
|
| 7 |
+
METEOR tests
|
| 8 |
+
=============
|
| 9 |
+
|
| 10 |
+
No Alignment test
|
| 11 |
+
------------------
|
| 12 |
+
|
| 13 |
+
>>> from nltk.translate import meteor
|
| 14 |
+
>>> from nltk import word_tokenize
|
| 15 |
+
|
| 16 |
+
If the candidate has no alignment to any of the references, the METEOR score is 0.
|
| 17 |
+
|
| 18 |
+
>>> round(meteor(
|
| 19 |
+
... [word_tokenize('The candidate has no alignment to any of the references')],
|
| 20 |
+
... word_tokenize('John loves Mary')
|
| 21 |
+
... ), 4)
|
| 22 |
+
0.0
|
| 23 |
+
|
| 24 |
+
Tests based on wikipedia examples
|
| 25 |
+
---------------------------------
|
| 26 |
+
|
| 27 |
+
Testing on `wikipedia examples <https://en.wikipedia.org/wiki/METEOR#Examples>`_
|
| 28 |
+
|
| 29 |
+
>>> same_res = round(meteor(
|
| 30 |
+
... [word_tokenize('The cat sat on the mat')],
|
| 31 |
+
... word_tokenize('The cat sat on the mat')
|
| 32 |
+
... ), 4)
|
| 33 |
+
>>> abs(same_res - 0.9977) < 1e-2
|
| 34 |
+
True
|
| 35 |
+
|
| 36 |
+
>>> meteor(
|
| 37 |
+
... [word_tokenize('The cat sat on the mat')],
|
| 38 |
+
... word_tokenize('on the mat sat the cat')
|
| 39 |
+
... )
|
| 40 |
+
0.5
|
| 41 |
+
|
| 42 |
+
>>> round(meteor(
|
| 43 |
+
... [word_tokenize('The cat sat on the mat')],
|
| 44 |
+
... word_tokenize('The cat was sat on the mat')
|
| 45 |
+
... ), 4)
|
| 46 |
+
0.9654
|
| 47 |
+
|
| 48 |
+
Test corresponding to issue #2751, where METEOR score > 1
|
| 49 |
+
|
| 50 |
+
>>> round(meteor(
|
| 51 |
+
... [word_tokenize('create or update a vm set')],
|
| 52 |
+
... word_tokenize('creates or updates a virtual machine scale set')
|
| 53 |
+
... ), 4)
|
| 54 |
+
0.7806
|
openflamingo/lib/python3.10/site-packages/nltk/test/metrics.doctest
ADDED
|
@@ -0,0 +1,321 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. Copyright (C) 2001-2024 NLTK Project
|
| 2 |
+
.. For license information, see LICENSE.TXT
|
| 3 |
+
|
| 4 |
+
=======
|
| 5 |
+
Metrics
|
| 6 |
+
=======
|
| 7 |
+
|
| 8 |
+
-----
|
| 9 |
+
Setup
|
| 10 |
+
-----
|
| 11 |
+
|
| 12 |
+
>>> import pytest
|
| 13 |
+
>>> _ = pytest.importorskip("numpy")
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
The `nltk.metrics` package provides a variety of *evaluation measures*
|
| 17 |
+
which can be used for a wide variety of NLP tasks.
|
| 18 |
+
|
| 19 |
+
>>> from nltk.metrics import *
|
| 20 |
+
|
| 21 |
+
------------------
|
| 22 |
+
Standard IR Scores
|
| 23 |
+
------------------
|
| 24 |
+
|
| 25 |
+
We can use standard scores from information retrieval to test the
|
| 26 |
+
performance of taggers, chunkers, etc.
|
| 27 |
+
|
| 28 |
+
>>> reference = 'DET NN VB DET JJ NN NN IN DET NN'.split()
|
| 29 |
+
>>> test = 'DET VB VB DET NN NN NN IN DET NN'.split()
|
| 30 |
+
>>> print(accuracy(reference, test))
|
| 31 |
+
0.8
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
The following measures apply to sets:
|
| 35 |
+
|
| 36 |
+
>>> reference_set = set(reference)
|
| 37 |
+
>>> test_set = set(test)
|
| 38 |
+
>>> precision(reference_set, test_set)
|
| 39 |
+
1.0
|
| 40 |
+
>>> print(recall(reference_set, test_set))
|
| 41 |
+
0.8
|
| 42 |
+
>>> print(f_measure(reference_set, test_set))
|
| 43 |
+
0.88888888888...
|
| 44 |
+
|
| 45 |
+
Measuring the likelihood of the data, given probability distributions:
|
| 46 |
+
|
| 47 |
+
>>> from nltk import FreqDist, MLEProbDist
|
| 48 |
+
>>> pdist1 = MLEProbDist(FreqDist("aldjfalskfjaldsf"))
|
| 49 |
+
>>> pdist2 = MLEProbDist(FreqDist("aldjfalssjjlldss"))
|
| 50 |
+
>>> print(log_likelihood(['a', 'd'], [pdist1, pdist2]))
|
| 51 |
+
-2.7075187496...
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
----------------
|
| 55 |
+
Distance Metrics
|
| 56 |
+
----------------
|
| 57 |
+
|
| 58 |
+
String edit distance (Levenshtein):
|
| 59 |
+
|
| 60 |
+
>>> edit_distance("rain", "shine")
|
| 61 |
+
3
|
| 62 |
+
>>> edit_distance_align("shine", "shine")
|
| 63 |
+
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)]
|
| 64 |
+
>>> edit_distance_align("rain", "brainy")
|
| 65 |
+
[(0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (4, 6)]
|
| 66 |
+
>>> edit_distance_align("", "brainy")
|
| 67 |
+
[(0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6)]
|
| 68 |
+
>>> edit_distance_align("", "")
|
| 69 |
+
[(0, 0)]
|
| 70 |
+
|
| 71 |
+
Other distance measures:
|
| 72 |
+
|
| 73 |
+
>>> s1 = set([1,2,3,4])
|
| 74 |
+
>>> s2 = set([3,4,5])
|
| 75 |
+
>>> binary_distance(s1, s2)
|
| 76 |
+
1.0
|
| 77 |
+
>>> print(jaccard_distance(s1, s2))
|
| 78 |
+
0.6
|
| 79 |
+
>>> print(masi_distance(s1, s2))
|
| 80 |
+
0.868
|
| 81 |
+
|
| 82 |
+
----------------------
|
| 83 |
+
Miscellaneous Measures
|
| 84 |
+
----------------------
|
| 85 |
+
|
| 86 |
+
Rank Correlation works with two dictionaries mapping keys to ranks.
|
| 87 |
+
The dictionaries should have the same set of keys.
|
| 88 |
+
|
| 89 |
+
>>> spearman_correlation({'e':1, 't':2, 'a':3}, {'e':1, 'a':2, 't':3})
|
| 90 |
+
0.5
|
| 91 |
+
|
| 92 |
+
Windowdiff uses a sliding window in comparing two segmentations of the same input (e.g. tokenizations, chunkings).
|
| 93 |
+
Segmentations are represented using strings of zeros and ones.
|
| 94 |
+
|
| 95 |
+
>>> s1 = "000100000010"
|
| 96 |
+
>>> s2 = "000010000100"
|
| 97 |
+
>>> s3 = "100000010000"
|
| 98 |
+
>>> s4 = "000000000000"
|
| 99 |
+
>>> s5 = "111111111111"
|
| 100 |
+
>>> windowdiff(s1, s1, 3)
|
| 101 |
+
0.0
|
| 102 |
+
>>> abs(windowdiff(s1, s2, 3) - 0.3) < 1e-6 # windowdiff(s1, s2, 3) == 0.3
|
| 103 |
+
True
|
| 104 |
+
>>> abs(windowdiff(s2, s3, 3) - 0.8) < 1e-6 # windowdiff(s2, s3, 3) == 0.8
|
| 105 |
+
True
|
| 106 |
+
>>> windowdiff(s1, s4, 3)
|
| 107 |
+
0.5
|
| 108 |
+
>>> windowdiff(s1, s5, 3)
|
| 109 |
+
1.0
|
| 110 |
+
|
| 111 |
+
----------------
|
| 112 |
+
Confusion Matrix
|
| 113 |
+
----------------
|
| 114 |
+
|
| 115 |
+
>>> reference = 'This is the reference data. Testing 123. aoaeoeoe'
|
| 116 |
+
>>> test = 'Thos iz_the rifirenci data. Testeng 123. aoaeoeoe'
|
| 117 |
+
>>> print(ConfusionMatrix(reference, test))
|
| 118 |
+
| . 1 2 3 T _ a c d e f g h i n o r s t z |
|
| 119 |
+
--+-------------------------------------------+
|
| 120 |
+
|<8>. . . . . 1 . . . . . . . . . . . . . . |
|
| 121 |
+
. | .<2>. . . . . . . . . . . . . . . . . . . |
|
| 122 |
+
1 | . .<1>. . . . . . . . . . . . . . . . . . |
|
| 123 |
+
2 | . . .<1>. . . . . . . . . . . . . . . . . |
|
| 124 |
+
3 | . . . .<1>. . . . . . . . . . . . . . . . |
|
| 125 |
+
T | . . . . .<2>. . . . . . . . . . . . . . . |
|
| 126 |
+
_ | . . . . . .<.>. . . . . . . . . . . . . . |
|
| 127 |
+
a | . . . . . . .<4>. . . . . . . . . . . . . |
|
| 128 |
+
c | . . . . . . . .<1>. . . . . . . . . . . . |
|
| 129 |
+
d | . . . . . . . . .<1>. . . . . . . . . . . |
|
| 130 |
+
e | . . . . . . . . . .<6>. . . 3 . . . . . . |
|
| 131 |
+
f | . . . . . . . . . . .<1>. . . . . . . . . |
|
| 132 |
+
g | . . . . . . . . . . . .<1>. . . . . . . . |
|
| 133 |
+
h | . . . . . . . . . . . . .<2>. . . . . . . |
|
| 134 |
+
i | . . . . . . . . . . 1 . . .<1>. 1 . . . . |
|
| 135 |
+
n | . . . . . . . . . . . . . . .<2>. . . . . |
|
| 136 |
+
o | . . . . . . . . . . . . . . . .<3>. . . . |
|
| 137 |
+
r | . . . . . . . . . . . . . . . . .<2>. . . |
|
| 138 |
+
s | . . . . . . . . . . . . . . . . . .<2>. 1 |
|
| 139 |
+
t | . . . . . . . . . . . . . . . . . . .<3>. |
|
| 140 |
+
z | . . . . . . . . . . . . . . . . . . . .<.>|
|
| 141 |
+
--+-------------------------------------------+
|
| 142 |
+
(row = reference; col = test)
|
| 143 |
+
<BLANKLINE>
|
| 144 |
+
|
| 145 |
+
>>> cm = ConfusionMatrix(reference, test)
|
| 146 |
+
>>> print(cm.pretty_format(sort_by_count=True))
|
| 147 |
+
| e a i o s t . T h n r 1 2 3 c d f g _ z |
|
| 148 |
+
--+-------------------------------------------+
|
| 149 |
+
|<8>. . . . . . . . . . . . . . . . . . 1 . |
|
| 150 |
+
e | .<6>. 3 . . . . . . . . . . . . . . . . . |
|
| 151 |
+
a | . .<4>. . . . . . . . . . . . . . . . . . |
|
| 152 |
+
i | . 1 .<1>1 . . . . . . . . . . . . . . . . |
|
| 153 |
+
o | . . . .<3>. . . . . . . . . . . . . . . . |
|
| 154 |
+
s | . . . . .<2>. . . . . . . . . . . . . . 1 |
|
| 155 |
+
t | . . . . . .<3>. . . . . . . . . . . . . . |
|
| 156 |
+
. | . . . . . . .<2>. . . . . . . . . . . . . |
|
| 157 |
+
T | . . . . . . . .<2>. . . . . . . . . . . . |
|
| 158 |
+
h | . . . . . . . . .<2>. . . . . . . . . . . |
|
| 159 |
+
n | . . . . . . . . . .<2>. . . . . . . . . . |
|
| 160 |
+
r | . . . . . . . . . . .<2>. . . . . . . . . |
|
| 161 |
+
1 | . . . . . . . . . . . .<1>. . . . . . . . |
|
| 162 |
+
2 | . . . . . . . . . . . . .<1>. . . . . . . |
|
| 163 |
+
3 | . . . . . . . . . . . . . .<1>. . . . . . |
|
| 164 |
+
c | . . . . . . . . . . . . . . .<1>. . . . . |
|
| 165 |
+
d | . . . . . . . . . . . . . . . .<1>. . . . |
|
| 166 |
+
f | . . . . . . . . . . . . . . . . .<1>. . . |
|
| 167 |
+
g | . . . . . . . . . . . . . . . . . .<1>. . |
|
| 168 |
+
_ | . . . . . . . . . . . . . . . . . . .<.>. |
|
| 169 |
+
z | . . . . . . . . . . . . . . . . . . . .<.>|
|
| 170 |
+
--+-------------------------------------------+
|
| 171 |
+
(row = reference; col = test)
|
| 172 |
+
<BLANKLINE>
|
| 173 |
+
|
| 174 |
+
>>> print(cm.pretty_format(sort_by_count=True, truncate=10))
|
| 175 |
+
| e a i o s t . T h |
|
| 176 |
+
--+---------------------+
|
| 177 |
+
|<8>. . . . . . . . . |
|
| 178 |
+
e | .<6>. 3 . . . . . . |
|
| 179 |
+
a | . .<4>. . . . . . . |
|
| 180 |
+
i | . 1 .<1>1 . . . . . |
|
| 181 |
+
o | . . . .<3>. . . . . |
|
| 182 |
+
s | . . . . .<2>. . . . |
|
| 183 |
+
t | . . . . . .<3>. . . |
|
| 184 |
+
. | . . . . . . .<2>. . |
|
| 185 |
+
T | . . . . . . . .<2>. |
|
| 186 |
+
h | . . . . . . . . .<2>|
|
| 187 |
+
--+---------------------+
|
| 188 |
+
(row = reference; col = test)
|
| 189 |
+
<BLANKLINE>
|
| 190 |
+
|
| 191 |
+
>>> print(cm.pretty_format(sort_by_count=True, truncate=10, values_in_chart=False))
|
| 192 |
+
| 1 |
|
| 193 |
+
| 1 2 3 4 5 6 7 8 9 0 |
|
| 194 |
+
---+---------------------+
|
| 195 |
+
1 |<8>. . . . . . . . . |
|
| 196 |
+
2 | .<6>. 3 . . . . . . |
|
| 197 |
+
3 | . .<4>. . . . . . . |
|
| 198 |
+
4 | . 1 .<1>1 . . . . . |
|
| 199 |
+
5 | . . . .<3>. . . . . |
|
| 200 |
+
6 | . . . . .<2>. . . . |
|
| 201 |
+
7 | . . . . . .<3>. . . |
|
| 202 |
+
8 | . . . . . . .<2>. . |
|
| 203 |
+
9 | . . . . . . . .<2>. |
|
| 204 |
+
10 | . . . . . . . . .<2>|
|
| 205 |
+
---+---------------------+
|
| 206 |
+
(row = reference; col = test)
|
| 207 |
+
Value key:
|
| 208 |
+
1:
|
| 209 |
+
2: e
|
| 210 |
+
3: a
|
| 211 |
+
4: i
|
| 212 |
+
5: o
|
| 213 |
+
6: s
|
| 214 |
+
7: t
|
| 215 |
+
8: .
|
| 216 |
+
9: T
|
| 217 |
+
10: h
|
| 218 |
+
<BLANKLINE>
|
| 219 |
+
|
| 220 |
+
For "e", the number of true positives should be 6, while the number of false negatives is 3.
|
| 221 |
+
So, the recall ought to be 6 / (6 + 3):
|
| 222 |
+
|
| 223 |
+
>>> cm.recall("e") # doctest: +ELLIPSIS
|
| 224 |
+
0.666666...
|
| 225 |
+
|
| 226 |
+
For "e", the false positive is just 1, so the precision should be 6 / (6 + 1):
|
| 227 |
+
|
| 228 |
+
>>> cm.precision("e") # doctest: +ELLIPSIS
|
| 229 |
+
0.857142...
|
| 230 |
+
|
| 231 |
+
The f-measure with default value of ``alpha = 0.5`` should then be:
|
| 232 |
+
|
| 233 |
+
* *1/(alpha/p + (1-alpha)/r) =*
|
| 234 |
+
* *1/(0.5/p + 0.5/r) =*
|
| 235 |
+
* *2pr / (p + r) =*
|
| 236 |
+
* *2 * 0.857142... * 0.666666... / (0.857142... + 0.666666...) =*
|
| 237 |
+
* *0.749999...*
|
| 238 |
+
|
| 239 |
+
>>> cm.f_measure("e") # doctest: +ELLIPSIS
|
| 240 |
+
0.749999...
|
| 241 |
+
|
| 242 |
+
--------------------
|
| 243 |
+
Association measures
|
| 244 |
+
--------------------
|
| 245 |
+
|
| 246 |
+
These measures are useful to determine whether the coocurrence of two random
|
| 247 |
+
events is meaningful. They are used, for instance, to distinguish collocations
|
| 248 |
+
from other pairs of adjacent words.
|
| 249 |
+
|
| 250 |
+
We bring some examples of bigram association calculations from Manning and
|
| 251 |
+
Schutze's SNLP, 2nd Ed. chapter 5.
|
| 252 |
+
|
| 253 |
+
>>> n_new_companies, n_new, n_companies, N = 8, 15828, 4675, 14307668
|
| 254 |
+
>>> bam = BigramAssocMeasures
|
| 255 |
+
>>> bam.raw_freq(20, (42, 20), N) == 20. / N
|
| 256 |
+
True
|
| 257 |
+
>>> bam.student_t(n_new_companies, (n_new, n_companies), N)
|
| 258 |
+
0.999...
|
| 259 |
+
>>> bam.chi_sq(n_new_companies, (n_new, n_companies), N)
|
| 260 |
+
1.54...
|
| 261 |
+
>>> bam.likelihood_ratio(150, (12593, 932), N)
|
| 262 |
+
1291...
|
| 263 |
+
|
| 264 |
+
For other associations, we ensure the ordering of the measures:
|
| 265 |
+
|
| 266 |
+
>>> bam.mi_like(20, (42, 20), N) > bam.mi_like(20, (41, 27), N)
|
| 267 |
+
True
|
| 268 |
+
>>> bam.pmi(20, (42, 20), N) > bam.pmi(20, (41, 27), N)
|
| 269 |
+
True
|
| 270 |
+
>>> bam.phi_sq(20, (42, 20), N) > bam.phi_sq(20, (41, 27), N)
|
| 271 |
+
True
|
| 272 |
+
>>> bam.poisson_stirling(20, (42, 20), N) > bam.poisson_stirling(20, (41, 27), N)
|
| 273 |
+
True
|
| 274 |
+
>>> bam.jaccard(20, (42, 20), N) > bam.jaccard(20, (41, 27), N)
|
| 275 |
+
True
|
| 276 |
+
>>> bam.dice(20, (42, 20), N) > bam.dice(20, (41, 27), N)
|
| 277 |
+
True
|
| 278 |
+
>>> bam.fisher(20, (42, 20), N) > bam.fisher(20, (41, 27), N) # doctest: +SKIP
|
| 279 |
+
False
|
| 280 |
+
|
| 281 |
+
For trigrams, we have to provide more count information:
|
| 282 |
+
|
| 283 |
+
>>> n_w1_w2_w3 = 20
|
| 284 |
+
>>> n_w1_w2, n_w1_w3, n_w2_w3 = 35, 60, 40
|
| 285 |
+
>>> pair_counts = (n_w1_w2, n_w1_w3, n_w2_w3)
|
| 286 |
+
>>> n_w1, n_w2, n_w3 = 100, 200, 300
|
| 287 |
+
>>> uni_counts = (n_w1, n_w2, n_w3)
|
| 288 |
+
>>> N = 14307668
|
| 289 |
+
>>> tam = TrigramAssocMeasures
|
| 290 |
+
>>> tam.raw_freq(n_w1_w2_w3, pair_counts, uni_counts, N) == 1. * n_w1_w2_w3 / N
|
| 291 |
+
True
|
| 292 |
+
>>> uni_counts2 = (n_w1, n_w2, 100)
|
| 293 |
+
>>> tam.student_t(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.student_t(n_w1_w2_w3, pair_counts, uni_counts, N)
|
| 294 |
+
True
|
| 295 |
+
>>> tam.chi_sq(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.chi_sq(n_w1_w2_w3, pair_counts, uni_counts, N)
|
| 296 |
+
True
|
| 297 |
+
>>> tam.mi_like(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.mi_like(n_w1_w2_w3, pair_counts, uni_counts, N)
|
| 298 |
+
True
|
| 299 |
+
>>> tam.pmi(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.pmi(n_w1_w2_w3, pair_counts, uni_counts, N)
|
| 300 |
+
True
|
| 301 |
+
>>> tam.likelihood_ratio(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.likelihood_ratio(n_w1_w2_w3, pair_counts, uni_counts, N)
|
| 302 |
+
True
|
| 303 |
+
>>> tam.poisson_stirling(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.poisson_stirling(n_w1_w2_w3, pair_counts, uni_counts, N)
|
| 304 |
+
True
|
| 305 |
+
>>> tam.jaccard(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.jaccard(n_w1_w2_w3, pair_counts, uni_counts, N)
|
| 306 |
+
True
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
For fourgrams, we have to provide more count information:
|
| 310 |
+
|
| 311 |
+
>>> n_w1_w2_w3_w4 = 5
|
| 312 |
+
>>> n_w1_w2, n_w1_w3, n_w2_w3 = 35, 60, 40
|
| 313 |
+
>>> n_w1_w2_w3, n_w2_w3_w4 = 20, 10
|
| 314 |
+
>>> pair_counts = (n_w1_w2, n_w1_w3, n_w2_w3)
|
| 315 |
+
>>> triplet_counts = (n_w1_w2_w3, n_w2_w3_w4)
|
| 316 |
+
>>> n_w1, n_w2, n_w3, n_w4 = 100, 200, 300, 400
|
| 317 |
+
>>> uni_counts = (n_w1, n_w2, n_w3, n_w4)
|
| 318 |
+
>>> N = 14307668
|
| 319 |
+
>>> qam = QuadgramAssocMeasures
|
| 320 |
+
>>> qam.raw_freq(n_w1_w2_w3_w4, pair_counts, triplet_counts, uni_counts, N) == 1. * n_w1_w2_w3_w4 / N
|
| 321 |
+
True
|