diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backoff.hpp b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backoff.hpp new file mode 100644 index 0000000000000000000000000000000000000000..de6da20ed825f24c32c205912b5897e277c2d302 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backoff.hpp @@ -0,0 +1,52 @@ +#pragma once + +#include +#include +#include + +#include + +namespace c10d { + +class TORCH_API Backoff { + public: + virtual ~Backoff() = default; + + virtual std::chrono::milliseconds nextBackoff() = 0; + virtual void reset() = 0; + + void sleepBackoff() { + std::this_thread::sleep_for(nextBackoff()); + } +}; + +class TORCH_API ExponentialBackoffWithJitter : public Backoff { + public: + ExponentialBackoffWithJitter(); + + std::chrono::milliseconds nextBackoff() override; + void reset() override; + + public: + std::chrono::milliseconds initialInterval{500}; + double randomizationFactor{0.5}; + double multiplier{1.5}; + std::chrono::milliseconds maxInterval{60000}; + + private: + std::mt19937 gen_; + std::chrono::milliseconds currentInterval_{0}; +}; + +class TORCH_API FixedBackoff : public Backoff { + public: + FixedBackoff(std::chrono::milliseconds interval); + + std::chrono::milliseconds nextBackoff() override; + void reset() override; + + private: + std::chrono::milliseconds interval_; +}; + +} // namespace c10d diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/DMAConnectivity.hpp b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/DMAConnectivity.hpp new file mode 100644 index 0000000000000000000000000000000000000000..cede6aa265c7733a5f65f5c1840d9fa093290bf3 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/DMAConnectivity.hpp @@ -0,0 +1,40 @@ +#pragma once + +#include + +#include + +namespace c10d { + +struct TORCH_API DMAConnectivity : c10::intrusive_ptr_target { + c10::DeviceType device_type; + std::string connection_type; + + // This is an NxN matrix representing the connectivity between N devices, + // where each element matrix[i][j] indicates the connectivity between device + // i and device j. A value of 0 denotes that there is no connection between + // device i and j. The meaning of non-zero values are specific to the + // connection type (e.g., for NVLink it represents the number of NVLinks). + std::vector> matrix; + + explicit DMAConnectivity( + c10::DeviceType device_type, + std::string connection_type, + std::vector> matrix); +}; + +struct DMAConnectivityDetector : c10::intrusive_ptr_target { + virtual c10::intrusive_ptr detect() = 0; + virtual ~DMAConnectivityDetector() {} +}; + +C10_EXPORT void register_dma_connectivity_detector( + c10::DeviceType device_type, + const std::string& connection_type, + c10::intrusive_ptr detector); + +TORCH_API c10::intrusive_ptr detect_dma_connectivity( + c10::DeviceType device_type, + const std::string& connection_type); + +} // namespace c10d diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/FileStore.hpp b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/FileStore.hpp new file mode 100644 index 0000000000000000000000000000000000000000..0bb0756e061678945521349daba47da9d0a6b696 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/FileStore.hpp @@ -0,0 +1,63 @@ +#pragma once + +#include + +#include +#include + +#include + +namespace c10d { + +class TORCH_API FileStore : public Store { + public: + explicit FileStore(std::string path, int numWorkers); + + ~FileStore() override; + + void set(const std::string& key, const std::vector& value) override; + + std::vector compareSet( + const std::string& key, + const std::vector& expectedValue, + const std::vector& desiredValue) override; + + std::vector get(const std::string& key) override; + + int64_t add(const std::string& key, int64_t value) override; + + int64_t getNumKeys() override; + + bool deleteKey(const std::string& key) override; + + bool check(const std::vector& keys) override; + + void wait(const std::vector& keys) override; + + void wait( + const std::vector& keys, + const std::chrono::milliseconds& timeout) override; + + // Returns the path used by the FileStore. + const std::string& getPath() const noexcept { + return path_; + } + + protected: + int64_t addHelper(const std::string& key, int64_t i); + + std::string path_; + off_t pos_{0}; + + int numWorkers_; + const std::string cleanupKey_; + const std::string refCountKey_; + const std::string regularPrefix_; + const std::string deletePrefix_; + + std::unordered_map> cache_; + + std::mutex activeFileOpLock_; +}; + +} // namespace c10d diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Functional.hpp b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Functional.hpp new file mode 100644 index 0000000000000000000000000000000000000000..cbb19e686095a75e747795d2216c2ff3eece380c --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Functional.hpp @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace c10d { + +C10_EXPORT void register_work( + const at::Tensor& tensor, + const c10::intrusive_ptr& work); + +} // namespace c10d diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GlooDeviceFactory.hpp b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GlooDeviceFactory.hpp new file mode 100644 index 0000000000000000000000000000000000000000..1221e9d033f2ffde494883fc08122e8fb6a55902 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/GlooDeviceFactory.hpp @@ -0,0 +1,32 @@ +#pragma once + +#ifdef USE_C10D_GLOO + +#include + +#include +#include +#include + +namespace c10d { + +class TORCH_API GlooDeviceFactory { + public: + // Create new device instance for specific interface. + static std::shared_ptr<::gloo::transport::Device> makeDeviceForInterface( + const std::string& interface); + + // Create new device instance for specific hostname or address. + static std::shared_ptr<::gloo::transport::Device> makeDeviceForHostname( + const std::string& hostname); +}; + +TORCH_DECLARE_SHARED_REGISTRY( + GlooDeviceRegistry, + ::gloo::transport::Device, + const std::string&, /* interface */ + const std::string& /* hostname */); + +} // namespace c10d + +#endif // USE_C10D_GLOO diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/HashStore.hpp b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/HashStore.hpp new file mode 100644 index 0000000000000000000000000000000000000000..3697d62301ba36655fff862fe462b6814208526a --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/HashStore.hpp @@ -0,0 +1,59 @@ +#pragma once + +#include +#include +#include + +#include + +namespace c10d { + +class TORCH_API HashStore : public Store { + public: + ~HashStore() override = default; + + void set(const std::string& key, const std::vector& data) override; + + std::vector compareSet( + const std::string& key, + const std::vector& expectedValue, + const std::vector& desiredValue) override; + + std::vector get(const std::string& key) override; + + void wait(const std::vector& keys) override { + wait(keys, timeout_); + } + + void wait( + const std::vector& keys, + const std::chrono::milliseconds& timeout) override; + + int64_t add(const std::string& key, int64_t value) override; + + int64_t getNumKeys() override; + + bool check(const std::vector& keys) override; + + bool deleteKey(const std::string& key) override; + + void append(const std::string& key, const std::vector& value) + override; + + std::vector> multiGet( + const std::vector& keys) override; + + void multiSet( + const std::vector& keys, + const std::vector>& values) override; + + // Returns true if this store support append, multiGet and multiSet + bool hasExtendedApi() const override; + + protected: + std::unordered_map> map_; + std::mutex m_; + std::condition_variable cv_; +}; + +} // namespace c10d diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/NCCLUtils.hpp b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/NCCLUtils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..070cbd34b3797016fa7f45976fc2a51c739736cc --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/NCCLUtils.hpp @@ -0,0 +1,718 @@ +#pragma once + +#ifdef USE_C10D_NCCL + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 14) +#define NCCL_HAS_COMM_NONBLOCKING +#endif + +#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 18) +#define NCCL_HAS_COMM_SPLIT +#endif + +// ncclGetLastError() is enabled only for NCCL versions 2.13+ +// ncclRemoteError only exists in NCCL versions 2.13+ +#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 13) +#define ENABLE_NCCL_GET_LAST_ERROR +#define NCCL_REMOTE_ERROR +#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3) +#define ENABLE_NCCL_GET_LAST_ERROR +#define NCCL_REMOTE_ERROR +#endif + +// Error checking is enabled only for NCCL versions 2.4+ since ncclCommAbort() +// and ncclCommGetAsyncError() are not supported in earlier versions. +#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 4) +#define ENABLE_NCCL_ERROR_CHECKING +#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3) +#define ENABLE_NCCL_ERROR_CHECKING +#endif + +// P2P is enabled only for NCCL versions 2.7+ since ncclSend() +// and ncclRecv() are not supported in earlier versions. +#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 7) +#define ENABLE_NCCL_P2P_SUPPORT +#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3) +#define ENABLE_NCCL_P2P_SUPPORT +#endif + +#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 11) +#define ENABLE_NCCL_PREMUL_SUM_SUPPORT +#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3) +#define ENABLE_NCCL_PREMUL_SUM_SUPPORT +#endif + +#if defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 17) +#define NCCL_HAS_COMM_CTA_CGA +#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3) +#define NCCL_HAS_COMM_CTA_CGA +#endif + +#if defined(NCCL_REGISTRATION_SUPPORTED) || \ + ((defined(NCCL_MAJOR) && (NCCL_MAJOR == 2) && defined(NCCL_MINOR) && \ + (NCCL_MINOR >= 19))) +#define NCCL_HAS_COMM_REGISTER +#elif defined(NCCL_MAJOR) && (NCCL_MAJOR >= 3) +#define NCCL_HAS_COMM_REGISTER +#endif + +// Macro to throw on a non-successful NCCL return value. +#define C10D_NCCL_CHECK(cmd, failureReason) \ + do { \ + ncclResult_t result = cmd; \ + if (result != ncclSuccess) { \ + std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \ + std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(result) + \ + "\n" + getNcclErrorDetailStr(result, failureReason); \ + TORCH_CHECK_WITH(DistBackendError, false, err); \ + } \ + } while (0) + +// Macro to throw on a non-successful NCCL return value for NONBLOCKING calls. +#define C10D_NCCL_CHECK_NONBLOCKING(cmd, failureReason) \ + do { \ + ncclResult_t result = cmd; \ + if (result != ncclSuccess && result != ncclInProgress) { \ + std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \ + std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(result) + \ + "\n" + getNcclErrorDetailStr(result, failureReason); \ + TORCH_CHECK_WITH(DistBackendError, false, err); \ + } \ + } while (0) + +// Macro to throw on a non-successful NCCL return value, non-blocking. +#define C10D_NCCL_CHECK_TIMEOUT(cmd, comm, failureReason) \ + ncclResult_t result = cmd; \ + auto startTimepoint = std::chrono::steady_clock::now(); \ + while (result == ncclInProgress) { \ + if (nccl_nonblocking_timeout() > 0) { \ + auto currentTimepoint = std::chrono::steady_clock::now(); \ + auto timeElapsed = std::chrono::duration_cast( \ + currentTimepoint - startTimepoint) \ + .count(); \ + if (timeElapsed > nccl_nonblocking_timeout()) { \ + std::string err = "NCCL timeout in: " + std::string(__FILE__) + ":" + \ + std::to_string(__LINE__) + ", " + \ + ncclGetErrorWithVersion(result) + "\n" + \ + getNcclErrorDetailStr(result, failureReason); \ + TORCH_CHECK_WITH(DistBackendError, false, err); \ + } \ + } \ + ncclCommGetAsyncError(comm, &result); \ + } \ + if (result != ncclSuccess) { \ + std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \ + std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(result) + \ + "\n" + getNcclErrorDetailStr(result, failureReason); \ + TORCH_CHECK_WITH(DistBackendError, false, err); \ + } + +#define C10D_NCCL_CHECK_TIMEOUT_GROUPEND(cmd, comm, failureReason) \ + ncclResult_t state = cmd; \ + auto startTimepoint = std::chrono::steady_clock::now(); \ + if (state == ncclInProgress) { \ + do { \ + if (nccl_nonblocking_timeout() > 0) { \ + auto currentTimepoint = std::chrono::steady_clock::now(); \ + auto timeElapsed = std::chrono::duration_cast( \ + currentTimepoint - startTimepoint) \ + .count(); \ + if (timeElapsed > nccl_nonblocking_timeout()) { \ + std::string err = "NCCL timeout in: " + std::string(__FILE__) + \ + ":" + std::to_string(__LINE__) + ", " + \ + ncclGetErrorWithVersion(state) + "\n" + \ + getNcclErrorDetailStr(state, failureReason); \ + TORCH_CHECK_WITH(DistBackendError, false, err); \ + } \ + } \ + ncclCommGetAsyncError(comm->getNcclComm(), &state); \ + } while (state == ncclInProgress); \ + } \ + if (state != ncclSuccess) { \ + std::string err = "NCCL error in: " + std::string(__FILE__) + ":" + \ + std::to_string(__LINE__) + ", " + ncclGetErrorWithVersion(state) + \ + "\n" + getNcclErrorDetailStr(state, failureReason); \ + TORCH_CHECK_WITH(DistBackendError, false, err); \ + } + +// Macro to print and abort on a non-successful NCCL return value. +#define C10D_NCCL_ASSERT(cmd) \ + do { \ + ncclResult_t result = cmd; \ + if (result != ncclSuccess) { \ + std::string err = ncclGetErrorWithVersion(result); \ + fprintf( \ + stderr, \ + "NCCL error in: %s:%d, %s\n", \ + __FILE__, \ + __LINE__, \ + err.c_str()); \ + abort(); \ + } \ + } while (0) + +namespace c10d { +#define DEFINE_CONSTANT(name, value) \ + static c10::IValue name = value; \ + static std::string name##_str = value; +// Update whenever changing contents or formatting of the dump +// (minor when adding fields, major when changing existing fields) +// Also update both JSON and Pickle dumps to make use of the newly defined +// field(s). +DEFINE_CONSTANT(version_val, "2.4"); +DEFINE_CONSTANT(entries_key, "entries"); +DEFINE_CONSTANT(nccl_comm_key, "nccl_comm_state"); +DEFINE_CONSTANT(version_key, "version"); +DEFINE_CONSTANT(pg_config_key, "pg_config"); +DEFINE_CONSTANT(pg_status_key, "pg_status"); +DEFINE_CONSTANT(record_id_key, "record_id"); +DEFINE_CONSTANT(pg_id_key, "pg_id"); +DEFINE_CONSTANT(pg_name_key, "process_group"); +DEFINE_CONSTANT(collective_seq_id_key, "collective_seq_id"); +DEFINE_CONSTANT(p2p_seq_id_key, "p2p_seq_id"); +DEFINE_CONSTANT(is_p2p_key, "is_p2p"); +DEFINE_CONSTANT(op_id_key, "op_id"); +DEFINE_CONSTANT(profiling_name_key, "profiling_name"); +DEFINE_CONSTANT(input_sizes_key, "input_sizes"); +DEFINE_CONSTANT(input_dtypes_key, "input_dtypes"); +DEFINE_CONSTANT(output_sizes_key, "output_sizes"); +DEFINE_CONSTANT(output_dtypes_key, "output_dtypes"); +DEFINE_CONSTANT(time_created_key, "time_created_ns"); +DEFINE_CONSTANT(duration_key, "duration_ms"); +DEFINE_CONSTANT(timeout_key, "timeout_ms"); +DEFINE_CONSTANT(frames_key, "frames"); +DEFINE_CONSTANT(state_key, "state"); +DEFINE_CONSTANT(line_key, "line"); +DEFINE_CONSTANT(name_key, "name"); +DEFINE_CONSTANT(filename_key, "filename"); +DEFINE_CONSTANT(retired_key, "retired"); +DEFINE_CONSTANT(time_discovered_started_key, "time_discovered_started_ns"); +DEFINE_CONSTANT(time_discovered_completed_key, "time_discovered_completed_ns"); +DEFINE_CONSTANT(completed_state, "completed"); +DEFINE_CONSTANT(scheduled_state, "scheduled"); +DEFINE_CONSTANT(started_state, "started"); +#undef DEFINE_CONSTANT + +TORCH_API size_t hashTensors(const std::vector& tensors); +TORCH_API std::string getNcclVersion(); +TORCH_API std::string ncclGetErrorWithVersion(ncclResult_t error); +bool nccl_use_nonblocking(); +int nccl_nonblocking_timeout(); + +// Provides additional detail into NCCL error codes based on when these are +// thrown in the NCCL codebase. +TORCH_API std::string getNcclErrorDetailStr( + ncclResult_t error, + std::optional processGroupFailureReason = std::nullopt); + +// Write NCCL debug info to local disk or any storage users define. +// There are some constrains we set for the debug info writer: +// 1. The writer should only be registered once. +// 2. Once registered, users cannot change it including un-register. +// 3. It is recommended to register the customized writer in the trainer setup, +// If users don't register before calling launchAsyncDebugDump, then users +// lose the chance to register (and the default writer will be +// auto-registered). +class TORCH_API DebugInfoWriter { + public: + virtual ~DebugInfoWriter() = default; + virtual void write(const std::string& ncclTrace); + static DebugInfoWriter& getWriter(int rank); + static void registerWriter(std::unique_ptr writer); + virtual std::string getWriterTarget() { + return filename_; + } + + protected: + DebugInfoWriter(std::string namePrefix, int rank) { + filename_ = c10::str(namePrefix, rank); + } + std::string filename_; + + private: + static std::unique_ptr writer_; + static std::atomic hasWriterRegistered_; +}; + +// RAII wrapper for NCCL communicator +class NCCLComm { + public: + explicit NCCLComm(ncclComm_t ncclComm) + : ncclComm_(ncclComm), + aborted_(false), + ncclAsyncErr_(ncclSuccess), + commFailureReason_(std::nullopt), + initialized_(false) {} + + NCCLComm() : NCCLComm(nullptr) {} + + ~NCCLComm() noexcept { + // Add lock in this destructor, as aborted_ needs to be read after memory + // barrier here. + std::unique_lock lock(mutex_); + if (ncclComm_ && initialized_ && !aborted_) { +#ifdef ENABLE_NCCL_ERROR_CHECKING + // Use ncclCommAbort instead of ncclCommDestroy here since + // ncclCommDestroy could block forever waiting for work to complete on + // the communicator. + C10D_NCCL_ASSERT(::ncclCommAbort(ncclComm_)); +#else + C10D_NCCL_ASSERT(::ncclCommDestroy(ncclComm_)); +#endif + } + } + + static std::shared_ptr create( + int numRanks, + int rank, + ncclUniqueId commId) { + auto comm = std::make_shared(); + C10D_NCCL_CHECK( + ncclCommInitRank(&(comm->ncclComm_), numRanks, commId, rank), + std::nullopt); + comm->ncclId_ = commId; + comm->rank_ = rank; + comm->initialized_ = true; + return comm; + } + +#ifdef NCCL_HAS_COMM_NONBLOCKING + static std::shared_ptr create( + int numRanks, + int rank, + ncclUniqueId commId, + ncclConfig_t& config) { + auto comm = std::make_shared(); + bool isInitialized = false; + if (nccl_use_nonblocking()) { + config.blocking = 0; + LOG(INFO) << "Rank " << rank + << ": creating NCCL communicator in nonblocking mode"; + C10D_NCCL_CHECK_NONBLOCKING( + ncclCommInitRankConfig( + &(comm->ncclComm_), numRanks, commId, rank, &config), + std::nullopt); + } else { + C10D_NCCL_CHECK( + ncclCommInitRankConfig( + &(comm->ncclComm_), numRanks, commId, rank, &config), + std::nullopt); + // under blocking mode, comm is initialized after NCCL CHECK + isInitialized = true; + } + comm->ncclId_ = commId; + comm->rank_ = rank; + comm->initialized_ = isInitialized; + return comm; + } + + static std::shared_ptr split( + NCCLComm* source, + int color_id, + int rank, + ncclConfig_t& config, + std::vector& ranks_ull); +#endif + +#if defined(IS_NCCLX) && defined(NCCL_COMM_DUMP) + std::unordered_map ncclCommDump() { + std::unordered_map dump; + if (isAborted()) { + LOG(INFO) << "Communicator was aborted before trying to dump its state."; + return dump; + } + C10D_NCCL_CHECK(::ncclCommDump(ncclComm_, dump), std::nullopt); + return dump; + } +#endif + + ncclUniqueId getNcclId() { + return ncclId_; + } + + // Must not be copyable + NCCLComm(const NCCLComm&) = delete; + NCCLComm& operator=(const NCCLComm&) = delete; + + // Do not support move assignment as there is no valid use case + NCCLComm& operator=(NCCLComm&& other) = delete; + + // Move constructable + NCCLComm(NCCLComm&& other) { + // Using other's lock, as it reads other's states + // Can not use this.mutex_, as this object is being constructed. + std::unique_lock lock(other.mutex_); + std::swap(ncclComm_, other.ncclComm_); + std::swap(aborted_, other.aborted_); + std::swap(ncclAsyncErr_, other.ncclAsyncErr_); + std::swap(initialized_, other.initialized_); + } + + ncclComm_t getNcclComm(); + + std::optional getNcclCommFailureReason() const { + std::unique_lock lock(mutex_); + return commFailureReason_; + } + + void ncclCommAbort( + std::optional commFailureReason = std::nullopt) { + std::unique_lock lock(mutex_); +#ifdef ENABLE_NCCL_ERROR_CHECKING + if (aborted_ && !initialized_) { + // Should not abort twice. + return; + } + +#ifdef NCCL_HAS_COMM_REGISTER + // Deregister all registered segments before aborting. + for (auto& it : registeredSegmentHandles_) { + void* handle = it.second; + C10D_NCCL_CHECK( + ::ncclCommDeregister(ncclComm_, handle), + c10::str( + "Failed to deregister segment handle ", + handle, + " on ncclComm_ ", + ncclComm_)); + } + registeredSegmentHandles_.clear(); +#endif + + // Set true failure reason if provided by ProcessGroupNCCL (e.g. work + // timeout) + commFailureReason_ = commFailureReason; + LOG(INFO) << "Aborting ncclComm_ " << ncclComm_ << " with reason: " + << (commFailureReason ? *commFailureReason + : "No abort reason provided."); +#ifndef NCCL_HAS_COMM_NONBLOCKING + C10D_NCCL_CHECK(::ncclCommAbort(ncclComm_), commFailureReason_); +#else + C10D_NCCL_CHECK_TIMEOUT( + ::ncclCommAbort(ncclComm_), ncclComm_, commFailureReason_); +#endif + aborted_ = true; + ncclComm_ = nullptr; + + // Set an appropriate error so that we avoid using the communicator. + if (ncclAsyncErr_ == ncclSuccess) { + ncclAsyncErr_ = ncclSystemError; + } +#else + // This is a NOOP, if error checks are disabled. + return; +#endif + } + + bool isAborted() const { + std::unique_lock lock(mutex_); + return aborted_; + } + + uint64_t getCommSplitCounter() const { + return ncclCommSplitCounter_; + } + + ncclResult_t checkForNcclError() { + std::unique_lock lock(mutex_); +#ifdef ENABLE_NCCL_ERROR_CHECKING + if (ncclAsyncErr_ != ncclSuccess) { + return ncclAsyncErr_; + } + C10D_NCCL_CHECK( + ncclCommGetAsyncError(ncclComm_, &ncclAsyncErr_), commFailureReason_); + return ncclAsyncErr_; +#else + // Always return success, if error checks are disabled. + return ncclSuccess; +#endif + } + + ncclResult_t registerSegment(void* ptr, size_t size) { + std::unique_lock lock(mutex_); +#ifdef NCCL_HAS_COMM_REGISTER + // We register only segments from cache allocator + // which are guaranteed to be with disjoint addr ranges. Thus, a ptr always + // maps to a unique handle and should not be registered before the current + // ptr is deregistered and freed. + TORCH_CHECK( + registeredSegmentHandles_.count(ptr) == 0, + "Segment with ptr ", + ptr, + " has already been registered on ncclComm_ ", + ncclComm_); + + void* handle; + C10D_NCCL_CHECK( + ncclCommRegister(ncclComm_, ptr, size, &handle), + c10::str( + "Failed to register segment with ptr ", + ptr, + ", size ", + size, + " on ncclComm_ ", + ncclComm_)); + registeredSegmentHandles_[ptr] = handle; + return ncclSuccess; +#else + return ncclInvalidUsage; +#endif + } + + ncclResult_t deregisterSegment(void* ptr) { + std::unique_lock lock(mutex_); +#ifdef NCCL_HAS_COMM_REGISTER + TORCH_CHECK( + registeredSegmentHandles_.count(ptr) == 1, + "Segment with ptr ", + ptr, + " is not registered on ncclComm_ ", + ncclComm_); + + void* handle = registeredSegmentHandles_[ptr]; + C10D_NCCL_CHECK( + ncclCommDeregister(ncclComm_, handle), + c10::str( + "Failed to deregister segment handle ", + handle, + ", with ptr ", + ptr, + " on ncclComm_ ", + ncclComm_)); + registeredSegmentHandles_.erase(ptr); + return ncclSuccess; +#else + return ncclInvalidUsage; +#endif + } + + friend class ProcessGroupNCCL; + + protected: + // a helper function to wait until the communicator is initialized; + void waitUntilInitialized(int timeoutSecs); + ncclComm_t ncclComm_; + // Unique nccl_id for this communicator. + ncclUniqueId ncclId_; + bool aborted_; + uint64_t ncclCommSplitCounter_{0}; + ncclResult_t ncclAsyncErr_; + mutable std::mutex mutex_; + // Rank that this communicator corresponds to. + int rank_; + // Optional reason for communicator failure, provided by ProcessGroupNCCL for + // better error messaging. + std::optional commFailureReason_; + bool initialized_{false}; +#ifdef NCCL_HAS_COMM_REGISTER + // Stores handlers for tensors registered by NCCL + std::unordered_map registeredSegmentHandles_; +#endif +}; + +// Helper that automatically cleans up premul sums. +struct ncclRedOpRAII { + ncclRedOpRAII() = default; + ncclRedOpRAII(ncclRedOp_t op) : op_(op) {} + ncclRedOpRAII(ncclRedOp_t op, ncclComm_t comm) + : op_(op), comm_(comm), premul_sum_(true) {} + ncclRedOpRAII(const ncclRedOpRAII&) = delete; + ncclRedOpRAII& operator=(const ncclRedOpRAII&) = delete; + ncclRedOpRAII(ncclRedOpRAII&& tmp) : ncclRedOpRAII() { + std::swap(tmp.op_, this->op_); + std::swap(tmp.comm_, this->comm_); + std::swap(tmp.premul_sum_, this->premul_sum_); + } +#if defined(ENABLE_NCCL_PREMUL_SUM_SUPPORT) + ~ncclRedOpRAII() { + if (premul_sum_) { + ncclRedOpDestroy(op_, comm_); + } + } +#endif + operator ncclRedOp_t() const { + return op_; + } + ncclRedOp_t op_; + ncclComm_t comm_; + bool premul_sum_ = false; +}; + +/* Helper used by work::getDuration() and nccl flight recorder */ +float getDurationFromEvent( + at::cuda::CUDAEvent& ncclStartEvent, + at::cuda::CUDAEvent& ncclEndEvent); + +struct NCCLTraceBuffer { + static NCCLTraceBuffer* get() { + // intentionally leak on exit + // because this will hold python state that may get destructed + static NCCLTraceBuffer* instance = new NCCLTraceBuffer(); + return instance; + } + NCCLTraceBuffer() { + max_entries_ = getCvarInt({"TORCH_NCCL_TRACE_BUFFER_SIZE"}, 0); + capture_cpp_stack_ = getCvarBool({"TORCH_NCCL_TRACE_CPP_STACK"}, false); + enabled_ = max_entries_ > 0; + } + using Event = at::cuda::CUDAEvent; + struct Entry { + size_t id_; // incremented id in the trace buffer + // used to figure out where in the circular entries + // buffer this entry will be located to + // update state information + size_t pg_id_; + std::tuple pg_name_; // + + // collective_seq_id and p2p_seq_id refer to actual kernel launches (e.g. 1 + // per coalesced group). + // collective_seq_id only increments for true collective operations (over + // all ranks in the group). p2p_seq_id only increments over non-collective + // operations in the group. op_id refers to logical operations (e.g. one per + // op inside coalesced group) + size_t collective_seq_id_; + size_t p2p_seq_id_; + size_t op_id_; + std::string profiling_name_; + + std::shared_ptr traceback_; + // we borrow pointers to start_ and end_ so we can query the state + // on reporting. However, once the event is completed, the call + // to `complete` will clear these. + Event *start_, *end_; + + // timestamp when the entry was created, likely close to the time the work + // was 'enqueued'- not necessarily started + c10::time_t time_created_; + + // configured timeout for this entry + c10::time_t timeout_ms_; + + // Is this a P2P event? + bool isP2P_; + + std::optional duration_; + + // timestamp when our CPU threads discovered that the kernel started. + // will always be _after_ it actually started, and can be very late + // if the watchdog thread got stuck on CUDA APIs. + std::optional time_discovered_started_; + + // timestamp when our CPU threads discovered that the kernel completed. + // will always be _after_ it actually complated, and can be the same time + // as the discovery of the start if the watchdog thread is stuck on CUDA + // APIs + std::optional time_discovered_completed_; + + // size information for input/output tensors + c10::SmallVector input_dims_; + std::vector input_dtypes_; + c10::SmallVector output_dims_; + std::vector output_dtypes_; + c10::SmallVector sizes_; // flattened from inputs, outputs + bool retired_ = false; // is this work entry no longer in the workMetaList_? + // a retired but not completed event has timed out + }; + + bool enabled_ = false; + bool capture_cpp_stack_ = false; + std::mutex mutex_; + std::vector entries_; + size_t max_entries_ = 0; + size_t next_ = 0; + size_t id_ = 0; + std::map> all_pg_status_ = {}; + std::map, std::vector> + pg_name_to_ranks_ = {}; + + std::optional record( + size_t pg_id, + const std::tuple& pg_name, + size_t collective_seq_id, + size_t p2p_seq_id, + size_t op_id, + std::string profiling_name, + const std::vector& inputs, + const std::vector& outputs, + Event* start, + Event* end, + std::chrono::milliseconds timeout_ms, + std::shared_ptr pg_status, + bool isP2P); + + void record_pg_ranks( + const std::tuple& pg_name, + std::vector ranks); + + void update_state(Entry& r); + + std::vector dump_entries(); + + /* + Mark an Event as completed and free its events. + This is called by the watchdog thread, and is asynchronous from the + perspective of the main thread. + compute_duration defaults to true since retire_id is only called in the + watchdog thread, which is currently a place we call cuda APIs which may hang, + but care should be taken to avoid computing duration in any function that must + never hang. (timing must also be enabled for compute_duration - see + TORCH_NCCL_ENABLE_TIMING). + */ + void retire_id(std::optional id, bool compute_duration = true); + + const c10::List getCollectiveTrace( + bool includeStacktraces, + bool onlyActive); + + // dump pg_entries + const c10::Dict getPgConfig(); + + const std::map> + getPgConfigJson(); + + // dump pg_status + const c10::Dict getPgStatus(); + + const std::map> + getPgStatusJson(); + + std::string dump_json( + const std::optional>>& ncclDumpMap, + bool includeCollectives, + bool onlyActive); + + // dump all collectives + ncclDumpMap + std::string dump( + const std::optional>>& ncclDumpMap, + bool includeCollectives, + bool includeStackTraces, + bool onlyActive); +}; +} // namespace c10d + +#endif // USE_C10D_NCCL diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroup.hpp b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroup.hpp new file mode 100644 index 0000000000000000000000000000000000000000..acf8c9c354a76b8c8a28f25c8a24854acea0e560 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroup.hpp @@ -0,0 +1,748 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +// ************************************************************************* +// PROCESS GROUP collective communication API IS BEING CHANGED BETWEEN +// versions 1.7 and 1.8. +// PLEASE DO NOT ADD ANY DEPENDENCIES. +// SEE RFC: https://github.com/pytorch/pytorch/issues/39662 +// ************************************************************************* + +constexpr auto kProcessGroupDefaultTimeout = + std::chrono::milliseconds(30 * 60 * 1000); + +namespace c10d { + +// ProcessGroup is a base class that captures collective and point to +// point communication in a fixed set of processes. +// +// The functions specified in the class below describe the API alone; +// implementations are provided in subclasses. +// +// Every function that performs I/O is executed asynchronously by a +// thread pool owned by the ProcessGroup (by default). They return an +// object that can be used to wait for completion or error. +// +// The ProcessGroup can instantiate subgroups with fewer or an equal +// number of members. Implementations must take care that multiple +// process groups can be used in parallel and synchronize accordingly. +// +// The ProcessGroup assumes a fixed set of processes. If the set +// changes, existing instances must be destructed and instantiation +// and initialization must start from scratch. For members of the +// process group to find each other (referred to as rendezvous from +// hereon) +// +class TORCH_API ProcessGroup : public torch::CustomClassHolder { + public: + // ProcessGroup Options is a base struct that defines the basic options + // when constructing a ProcessGroup. Each ProcessGroup subclass should + // extend this struct and define its options if it wants to provide more + // config options (beyond basic ones defined here) to end user. + struct TORCH_API Options : torch::CustomClassHolder { + explicit Options( + std::string backend, + std::chrono::milliseconds timeout = kProcessGroupDefaultTimeout) + : timeout(timeout), backend(std::move(backend)) {} + ~Options() override = default; + + std::chrono::milliseconds timeout; + + // backend name + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + const std::string backend; + }; + + enum BackendType : uint8_t { + UNDEFINED = 0, + GLOO = 1, + NCCL = 2, + UCC = 3, + MPI = 4, + CUSTOM = 5, + }; + + // Not used, set for backwards compatibility and only used for TypeDef in + // Ops.cpp + explicit ProcessGroup(int rank, int size); + + explicit ProcessGroup( + const c10::intrusive_ptr<::c10d::Store>& store, + int rank, + int size, + c10::intrusive_ptr options); + ~ProcessGroup() override; + + int getRank() const { + return rank_; + } + + int getSize() const { + return size_; + } + + // Returns an unique opaque ID of this process group object. + int64_t getID() const { + return reinterpret_cast(this); + } + + // Returns an unique opaque ID of a backend for the specific backend type + // that can correlate with this process group's collectives. + int64_t getBackendID(BackendType backend_type) const { + return reinterpret_cast(getBackend(backend_type).get()); + } + + virtual const std::string getBackendName() const { + return options_->backend; + }; + + BackendType getBackendType() const { + return backendType_; + }; + + virtual void startCoalescing(c10::DeviceType deviceType) { + // only nccl has implemented startCoalescing so only execute for nccl + // backends + auto backend = getBackend(deviceType); + backend->startCoalescing(); + } + + virtual c10::intrusive_ptr endCoalescing(c10::DeviceType deviceType) { + // only nccl has implemented endCoalescing so only execute for nccl + // backends + auto backend = getBackend(deviceType); + auto work = backend->endCoalescing(); + return work; + } + + virtual c10::intrusive_ptr broadcast( + std::vector& tensors, + const BroadcastOptions& opts = BroadcastOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::broadcast_", "") + .typed< + std::tuple, c10::intrusive_ptr>( + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t, + int64_t, + bool, + int64_t)>(); + // It's awakward to unbox the opts here and box them again in the custom C++ + // op. But it's also complicated to make opts as a CustomClassHolder. Leave + // it as it is now. + return std::get<1>(op.call( + tensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.rootRank, + opts.rootTensor, + opts.asyncOp, + opts.timeout.count())); + } + + virtual c10::intrusive_ptr allreduce( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::allreduce_", "") + .typed< + std::tuple, c10::intrusive_ptr>( + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + const c10::intrusive_ptr<::c10d::ReduceOp>&, + const std::optional& sparse_indices, + int64_t)>(); + + return std::get<1>(op.call( + tensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + c10::make_intrusive(opts.reduceOp), + opts.sparseIndices, + opts.timeout.count())); + } + + virtual c10::intrusive_ptr allreduce_coalesced( + std::vector& tensors, + const AllreduceCoalescedOptions& opts = AllreduceCoalescedOptions()) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::allreduce_coalesced_", "") + .typed( + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + const c10::intrusive_ptr<::c10d::ReduceOp>&, + int64_t)>(); + + return op.call( + tensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + c10::make_intrusive(opts.reduceOp), + opts.timeout.count()); + } + + virtual c10::intrusive_ptr reduce( + std::vector& tensors, + const ReduceOptions& opts = ReduceOptions()) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::reduce_", "") + .typed( + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + const c10::intrusive_ptr<::c10d::ReduceOp>&, + int64_t, + int64_t, + int64_t)>(); + return op.call( + tensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + c10::make_intrusive(opts.reduceOp), + opts.rootRank, + opts.rootTensor, + opts.timeout.count()); + } + + virtual c10::intrusive_ptr allgather( + std::vector>& outputTensors, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::allgather_", "") + .typed>, + c10::intrusive_ptr>( + const std::vector>&, + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t)>(); + + return std::get<1>(op.call( + outputTensors, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.timeout.count())); + } + + // Gathers a single tensor inputBuffer into a single buffer outputBuffer that + // is interpreted as a contiguous collection of size inputBuffer * WORLD_SIZE. + // For implementers of ProcessGroup API and advanced users only. + // Note: this function will be deprecated in near future. + virtual c10::intrusive_ptr _allgather_base( + at::Tensor& outputBuffer, + at::Tensor& inputBuffer, + const AllgatherOptions& opts = AllgatherOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::_allgather_base_", "") + .typed>( + at::Tensor&, + at::Tensor&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + bool, + int64_t)>(); + + return std::get<1>(op.call( + outputBuffer, + inputBuffer, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.asyncOp, + opts.timeout.count())); + } + + // This function is deprecated and will be moved out of ProcessGroup to comms: + // * do not add dependencies on this function, + // * do not implement it in your ProcessGroup, implement _allgather_base + // instead. + virtual c10::intrusive_ptr allgather_coalesced( + std::vector>& outputTensorLists, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::allgather_coalesced_", "") + .typed( + const std::vector>&, + const at::TensorList&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&)>(); + + return op.call( + outputTensorLists, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this)); + } + + // This function is a coalesced version of `allgather_into_tensor` (currently + // still named as `_allgather_base`). Each tensor in the vector corresponds to + // an input/output of one `allgather_into_tensor` operation. + virtual c10::intrusive_ptr allgather_into_tensor_coalesced( + std::vector& outputTensors, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::allgather_into_tensor_coalesced_", "") + .typed( + const at::TensorList, + const at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&)>(); + + return op.call( + outputTensors, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this)); + } + + virtual c10::intrusive_ptr gather( + std::vector>& outputTensors, + std::vector& inputTensors, + const GatherOptions& opts = GatherOptions()) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::gather_", "") + .typed( + const std::vector>&, + const at::TensorList&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t, + int64_t)>(); + return op.call( + outputTensors, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.rootRank, + opts.timeout.count()); + } + + virtual c10::intrusive_ptr scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ScatterOptions& opts = ScatterOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::scatter_", "") + .typed< + std::tuple, c10::intrusive_ptr>( + const at::TensorList&, + const std::vector>&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t, + bool, + int64_t)>(); + return std::get<1>(op.call( + outputTensors, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.rootRank, + opts.asyncOp, + opts.timeout.count())); + } + + virtual c10::intrusive_ptr reduce_scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ReduceScatterOptions& opts = ReduceScatterOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::reduce_scatter_", "") + .typed< + std::tuple, c10::intrusive_ptr>( + const at::TensorList&, + const std::vector>&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + const c10::intrusive_ptr<::c10d::ReduceOp>&, + int64_t)>(); + return std::get<1>(op.call( + outputTensors, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + c10::make_intrusive<::c10d::ReduceOp>(opts.reduceOp), + opts.timeout.count())); + } + + virtual c10::intrusive_ptr _reduce_scatter_base( + at::Tensor& outputBuffer, + at::Tensor& inputBuffer, + const ReduceScatterOptions& opts = ReduceScatterOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::_reduce_scatter_base_", "") + .typed>( + at::Tensor&, + at::Tensor&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + const c10::intrusive_ptr<::c10d::ReduceOp>&, + bool, + int64_t)>(); + return std::get<1>(op.call( + outputBuffer, + inputBuffer, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + c10::make_intrusive<::c10d::ReduceOp>(opts.reduceOp), + opts.asyncOp, + opts.timeout.count())); + } + + // This function is a coalesced version of `reduce_scatter_tensor` (currently + // still named as `_reduce_scatter_base`). Each tensor in the vector + // corresponds to an input/output of one `reduce_scatter_tensor` operation. + virtual c10::intrusive_ptr reduce_scatter_tensor_coalesced( + std::vector& outputTensors, + std::vector& inputTensors, + const ReduceScatterOptions& opts = ReduceScatterOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::reduce_scatter_tensor_coalesced_", "") + .typed( + const at::TensorList, + const at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + const c10::intrusive_ptr<::c10d::ReduceOp>&, + int64_t)>(); + + return op.call( + outputTensors, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + c10::make_intrusive<::c10d::ReduceOp>(opts.reduceOp), + opts.timeout.count()); + } + + virtual c10::intrusive_ptr alltoall_base( + at::Tensor& outputBuffer, + at::Tensor& inputBuffer, + std::vector& outputSplitSizes, + std::vector& inputSplitSizes, + const AllToAllOptions& opts = AllToAllOptions()) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::alltoall_base_", "") + .typed( + at::Tensor&, + at::Tensor&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + std::vector, + std::vector, + int64_t)>(); + return op.call( + outputBuffer, + inputBuffer, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + outputSplitSizes, + inputSplitSizes, + opts.timeout.count()); + } + + virtual c10::intrusive_ptr alltoall( + std::vector& outputTensors, + std::vector& inputTensors, + const AllToAllOptions& opts = AllToAllOptions()) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::alltoall_", "") + .typed< + std::tuple, c10::intrusive_ptr>( + const at::TensorList&, + const at::TensorList&, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t)>(); + return std::get<1>(op.call( + outputTensors, + inputTensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.timeout.count())); + } + + virtual void monitoredBarrier( + const BarrierOptions& opts, + bool wait_all_ranks = false) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::monitored_barrier_", "") + .typed&, + const std::vector&, + int64_t, + bool)>(); + // Default to using cpu implementation, monitored barrier is only for GLOO + at::Tensor tensor = at::empty({0}, at::TensorOptions().device(at::kCPU)); + op.call( + tensor, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.device_ids, + opts.timeout.count(), + wait_all_ranks); + } + + // Agrees on an initial sequence number for the whole group by having rank 0 + // create it and broadcast it to other ranks using the store. Only implemented + // for GLOO and NCCL backends currently. + virtual void setSequenceNumberForGroup() { + auto backendType = getBackendType(); + // TODO: HACK for backend name to get sequence number for that backend. + if (backendType == ProcessGroup::BackendType::GLOO || + backendType == ProcessGroup::BackendType::NCCL || + backendType == ProcessGroup::BackendType::UCC) { + getDefaultBackend()->setSequenceNumberForGroup(); + } else { + TORCH_CHECK( + false, + c10::str( + "ProcessGroup ", + getBackendName(), + " does not yet support sequence numbers.")); + } + } + + // Retrieves the current sequence number for the whole group, which should be + // in sync. If the returned number is not consistent across the group, it + // may indicate that there is some sort of collective desynchronization. + virtual uint64_t getSequenceNumberForGroup() { + auto backendType = getBackendType(); + + // TODO: HACK for backend name to get sequence number for that backend. + if (backendType == ProcessGroup::BackendType::GLOO || + backendType == ProcessGroup::BackendType::NCCL || + backendType == ProcessGroup::BackendType::UCC) { + return getDefaultBackend()->getSequenceNumberForGroup(); + } else { + TORCH_CHECK( + false, + c10::str( + "ProcessGroup ", + getBackendName(), + " does not yet support sequence numbers.")); + } + } + + virtual c10::intrusive_ptr send( + std::vector& tensors, + int dstRank, + int tag) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::send", "") + .typed( + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t, + int64_t)>(); + return op.call( + tensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + dstRank, + tag); + } + + virtual c10::intrusive_ptr recv( + std::vector& tensors, + int srcRank, + int tag) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::recv_", "") + .typed( + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t, + int64_t)>(); + return op.call( + tensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + srcRank, + tag); + } + + virtual c10::intrusive_ptr recvAnysource( + std::vector& tensors, + int tag) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::recv_any_source_", "") + .typed( + at::TensorList, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + int64_t)>(); + return op.call( + tensors, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + tag); + } + + virtual c10::intrusive_ptr barrier( + const BarrierOptions& opts = BarrierOptions()) { + static at::Tensor tensor; + // TODO: if nccl was specified then use it + auto device = opts.device; + if (device.has_value()) { + // set device tensor from argument + tensor = at::empty( + {1}, at::TensorOptions().device(device.value()).dtype(at::kByte)); + } else if (backendType_ == c10d::ProcessGroup::BackendType::NCCL) { + // set cuda tensor + tensor = at::empty( + {1}, + at::TensorOptions().device(at::DeviceType::CUDA).dtype(at::kByte)); + } else { + // Default to using cpu implementation + tensor = at::empty( + {1}, + at::TensorOptions().device(at::DeviceType::CPU).dtype(at::kByte)); + } + + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("c10d::barrier", "") + .typed( + at::Tensor, + const c10::intrusive_ptr<::c10d::ProcessGroup>&, + const std::vector&, + int64_t)>(); + + return op.call( + tensor, + c10::intrusive_ptr::unsafe_reclaim_from_nonowning(this), + opts.device_ids, + opts.timeout.count()); + } + + c10::intrusive_ptr getOptions() { + return options_; + } + + bool hasBackends() { + return !deviceTypeToBackendType_.empty(); + } + + void setBackend( + c10::DeviceType deviceType, + BackendType backendType, + const std::optional>& backend) { + // TODO: should we add these entries after the backend setting succeeds? + deviceTypeToBackendType_[deviceType] = backendType; + deviceTypes_.insert(deviceType); + // if the backendType is already set then reuse it for this device + if (backendTypeToBackend_.find(backendType) != + backendTypeToBackend_.end()) { + auto existingBackend = backendTypeToBackend_.at(backendType); + deviceTypeToBackend_[deviceType] = existingBackend; + TORCH_CHECK( + existingBackend->getBoundDeviceId() == + (*backend)->getBoundDeviceId()); + } else { + // check if backend has value + if (backend.has_value()) { + deviceTypeToBackend_[deviceType] = backend.value(); + backendTypeToBackend_[backendType] = backend.value(); + (*backend)->setBoundDeviceId(bound_device_id_); + } + } + } + + c10::intrusive_ptr getDefaultBackend() const { + TORCH_CHECK( + backendTypeToBackend_.find(backendType_) != backendTypeToBackend_.end(), + "Could not find the default backend type ", + backendType_, + " for Process Group with name ", + getBackendName(), + "."); + return backendTypeToBackend_.at(backendType_); + } + + c10::intrusive_ptr getBackend(c10::DeviceType deviceType); + + c10::intrusive_ptr getBackend(BackendType backendType) const { + TORCH_CHECK( + backendTypeToBackend_.find(backendType) != backendTypeToBackend_.end(), + "Could not find backend type ", + backendType, + "."); + return backendTypeToBackend_.at(backendType); + } + + // Return device types supported by this ProcessGroup. + // Note: the return type is `Device` rather than `DeviceType` for the purpose + // of easy comparison at Python level. The `Device` will have default index + // (-1). + std::vector getDeviceTypes() const { + std::vector devices; + devices.reserve(deviceTypes_.size()); + for (auto& dt : deviceTypes_) { + devices.emplace_back(dt); + } + return devices; + } + + void registerOnCompletionHook( + std::function)>&& hook) { + getDefaultBackend()->registerOnCompletionHook(std::move(hook)); + } + + void waitForPendingWorks() { + getDefaultBackend()->waitForPendingWorks(); + } + + bool hasHooks() const { + return getDefaultBackend()->hasHooks(); + } + + const std::string& getGroupName() const; + void setGroupName(const std::string& name); + const std::string& getGroupDesc() const; + void setGroupDesc(const std::string& name); + void enableCollectivesTiming(); + + void release_resources() override; + + // ProcessGroups optionally can be "bound" to a specific device. + // Currently this is only for nccl and allows for some opt-in + // optimizations such as automatic use of ncclCommSplit. The device + // is specified in `init_process_group` and eventually makes it + // here and then down into the actual backend instances. + std::optional getBoundDeviceId() const { + return bound_device_id_; + } + + void setBoundDeviceId(std::optional device) { + if (device) { + TORCH_CHECK(device->has_index(), "setBoundDeviceId must have an index"); + } + bound_device_id_ = device; + } + + protected: + // Implementations of this interface need to call this to setup + // appropriate logging etc. + void init(); + + c10::intrusive_ptr store_; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + const int rank_; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + const int size_; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + const c10::intrusive_ptr options_; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + const BackendType backendType_; + std::string pg_desc_; + + // Debug level setting. It is parsed once when ProcessGroup is constructed and + // remains the same across use of this process group. + DebugLevel dist_debug_level_{DebugLevel::Off}; + + // Backend classes for this ProcessGroup + std::unordered_set deviceTypes_; + std::unordered_map deviceTypeToBackendType_; + std::unordered_map> + deviceTypeToBackend_; + std::unordered_map> + backendTypeToBackend_; + + std::optional bound_device_id_; +}; + +} // namespace c10d diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp new file mode 100644 index 0000000000000000000000000000000000000000..9f1e63d58adf2d2858271bb3e80c3d5cf86739ea --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp @@ -0,0 +1,448 @@ +#pragma once + +#ifdef USE_C10D_GLOO + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +namespace c10d { + +constexpr const char* GLOO_BACKEND_NAME = "gloo"; + +// ProcessGroupGloo implements Gloo bindings for c10d. +// +// All functions on this class are expected to be called in the same +// order across processes in the group. This is the only way that we +// can guarantee to match up the same calls across processes. For +// multi-threaded usage of process groups, you can use consider using +// multiple process group instances. +// +// The Gloo algorithms that this class calls into are cached by their +// signature (see description of AlgorithmKey above). This cache works +// as follows: every function call instantiates an AlgorithmKey and +// looks in the cache for existing entries. If there is one, it is +// removed from the cache and returned to the caller. If there are +// none, a new entry is created and returned. If an entry was created +// before, but is still in use, the call will block and wait until the +// entry is returned to the cache. +// +// In the future, we hope to extend this to allow multiple entries per +// key, to enable parallelism for a single key. The number of entries +// per key must always be identical for all processes. This maximum +// number can be automatically tuned, but only if we let a single +// process take charge, and have it broadcast the limits. +// +class TORCH_API ProcessGroupGloo : public Backend { + public: + // AsyncWork is the Gloo specific superclass for asynchronous work items. + // We can split asynchronous work into 3 phases: + // 1) Sanity checks and prepare input (e.g. memcpy) + // 2) Run operation on background thread + // 3) Synchronize with completion on foreground thread + // + // There is state to be shared between these 3 phases and all of this state + // is captured in the AsyncWork class and its derivatives. + // + // Note: while we are porting operations to use new style collectives, there + // is a split between operations using the existing caching approach and + // operations using the new AsyncWork base class. Over time we will port + // all operations and perform needed cleanup. + // + // FIXME: This probably should be called WorkGloo since the work is executed + // in sync mode by a background thread. + class TORCH_API AsyncWork : public Work { + public: + explicit AsyncWork( + std::vector> outputTensors, + OpType opType, + uint64_t seq, + const char* profilingTitle = nullptr, + const std::optional>& inputTensors = + std::nullopt); + + ~AsyncWork() override = default; + + static void execute(const c10::intrusive_ptr& work); + + virtual void run() = 0; + + std::vector result() override; + + c10::intrusive_ptr getFuture() override; + uint64_t getSequencenumber() const override; + + protected: + friend class ProcessGroupGloo; + + private: + void finishWorkGloo(); + void finishWorkGlooError(const std::exception_ptr& eptr); + inline void recordAsyncWorkProfilingInfo( + const char* profilingTitle, + const std::optional>& inputTensors); + + const std::vector> outputTensors_; + c10::intrusive_ptr future_; + std::function recordFunctionBeforeCallback_; + const uint64_t seq_; + }; + + // Wrap c10d store as Gloo store + class TORCH_API GlooStore : public ::gloo::rendezvous::Store { + public: + GlooStore(const c10::intrusive_ptr<::c10d::Store>& store) : store_(store) {} + + void setUint(const std::string& key, const std::vector& value) { + store_->set(key, value); + } + + void set(const std::string& key, const std::vector& value) override { + std::vector tmp(value.begin(), value.end()); + store_->set(key, tmp); + } + + std::vector getUint(const std::string& key) { + auto value = store_->get(key); + return value; + } + + std::vector get(const std::string& key) override { + auto value = store_->get(key); + return std::vector(value.begin(), value.end()); + } + + void wait(const std::vector& keys) override { + store_->wait(keys, ::c10d::Store::kDefaultTimeout); + } + + void wait( + const std::vector& keys, + const std::chrono::milliseconds& timeout) override { + store_->wait(keys, timeout); + } + +#ifdef GLOO_STORE_HAS_STORE_V2 + bool has_v2_support() override { + return store_->hasExtendedApi(); + } + + std::vector> multi_get( + const std::vector& keys) override { + std::vector> res; + for (auto& value : store_->multiGet(keys)) { + res.emplace_back(value.begin(), value.end()); + } + return res; + } + + void multi_set( + const std::vector& keys, + const std::vector>& values) override { + std::vector> u_values; + u_values.reserve(values.size()); + for (auto& value : values) { + u_values.emplace_back(value.begin(), value.end()); + } + store_->multiSet(keys, u_values); + } + + void append(const std::string& key, const std::vector& value) + override { + std::vector tmp(value.begin(), value.end()); + return store_->append(key, tmp); + } + + int64_t add(const std::string& key, int64_t value) override { + return store_->add(key, value); + } +#endif + + protected: + c10::intrusive_ptr<::c10d::Store> store_; + }; + + // For send and recv operations there is no need to pass them to the + // thread pool as they are entirely completed by the device thread. + // This work object is used to synchronize completion of the send or + // recv operation. It keeps a reference to the tensor it is + // operating on to prevent it from being deallocated while the + // operation is still in flight. + class TORCH_API SendWork : public Work { + public: + explicit SendWork( + at::Tensor& tensor, + std::unique_ptr<::gloo::transport::UnboundBuffer> buffer, + uint64_t seq); + + bool wait(std::chrono::milliseconds timeout = kNoTimeout) override; + + void abort() override; + + uint64_t getSequencenumber() const override; + + protected: + at::Tensor tensor_; + std::unique_ptr<::gloo::transport::UnboundBuffer> buffer_; + const uint64_t seq_; + }; + + class TORCH_API RecvWork : public Work { + public: + explicit RecvWork( + at::Tensor& tensor, + std::unique_ptr<::gloo::transport::UnboundBuffer> buffer, + OpType opType, + uint64_t seq, + const char* profilingTitle = nullptr); + + int sourceRank() const override; + + bool wait(std::chrono::milliseconds timeout = kNoTimeout) override; + + void abort() override; + + uint64_t getSequencenumber() const override; + + protected: + at::Tensor tensor_; + std::unique_ptr<::gloo::transport::UnboundBuffer> buffer_; + int srcRank_; + const uint64_t seq_; + }; + + struct TORCH_API Options : public Backend::Options { + explicit Options( + std::chrono::milliseconds timeout = kBackendDefaultTimeout); + + // return intrusive_ptr of the object + static c10::intrusive_ptr create( + std::chrono::milliseconds timeout = kBackendDefaultTimeout) { + return c10::make_intrusive(timeout); + } + + std::vector> devices; + int threads; + }; + + const std::string getBackendName() const override { + return std::string(GLOO_BACKEND_NAME); + } + + // Helper functions to create a new device object. + // They are static functions on this class to keep them logically + // separate from the rest of the code base (e.g. torch/csrc/distributed). + + // Create new device instance for specific interface. + static std::shared_ptr<::gloo::transport::Device> createDeviceForInterface( + const std::string& interface); + + // Create new device instance for specific hostname or address. + static std::shared_ptr<::gloo::transport::Device> createDeviceForHostname( + const std::string& hostname); + + // Create new device instance. + // It tries to resolve this machine's hostname and bind to that address. + // If that fails (i.e. the hostname doesn't resolve to an address), it + // falls back to binding to the loopback address. + static std::shared_ptr<::gloo::transport::Device> createDefaultDevice(); + + // Create ProcessGroupGloo instance. + static c10::intrusive_ptr createProcessGroupGloo( + const c10::intrusive_ptr& store, + int rank, + int size, + std::chrono::milliseconds timeout); + + explicit ProcessGroupGloo( + const c10::intrusive_ptr& store, + int rank, + int size, + c10::intrusive_ptr options = Options::create()); + + ~ProcessGroupGloo() override; + + c10::intrusive_ptr getOptions() { + return options_; + } + + c10::intrusive_ptr broadcast( + std::vector& tensors, + const BroadcastOptions& opts = BroadcastOptions()) override; + + c10::intrusive_ptr allreduce( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()) override; + + c10::intrusive_ptr allreduce_sparse( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()) override; + + c10::intrusive_ptr allreduce_coalesced( + std::vector& tensors, + const AllreduceCoalescedOptions& opts = + AllreduceCoalescedOptions()) override; + + c10::intrusive_ptr reduce( + std::vector& tensors, + const ReduceOptions& opts = ReduceOptions()) override; + + c10::intrusive_ptr _reduce_scatter_base( + at::Tensor& outputTensor, + at::Tensor& inputTensor, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr _allgather_base( + at::Tensor& output_tensor, + at::Tensor& input_tensor, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr allgather( + std::vector>& outputs, + std::vector& inputs, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr allgather_coalesced( + std::vector>& output_lists, + std::vector& input_list, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr allgather_into_tensor_coalesced( + std::vector& outputs, + std::vector& inputs, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr gather( + std::vector>& outputs, + std::vector& inputs, + const GatherOptions& opts = GatherOptions()) override; + + c10::intrusive_ptr scatter( + std::vector& outputs, + std::vector>& inputs, + const ScatterOptions& opts = ScatterOptions()) override; + + c10::intrusive_ptr reduce_scatter( + std::vector& outputs, + std::vector>& inputs, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr reduce_scatter_tensor_coalesced( + std::vector& outputTensors, + std::vector& inputTensors, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr alltoall_base( + at::Tensor& outputTensor, + at::Tensor& inputTensor, + std::vector& outputCounts, + std::vector& inputCounts, + const AllToAllOptions& opts = AllToAllOptions()) override; + + c10::intrusive_ptr send( + std::vector& tensors, + int dstRank, + int tag) override; + + c10::intrusive_ptr recv( + std::vector& tensors, + int srcRank, + int tag) override; + + c10::intrusive_ptr recvAnysource( + std::vector& tensors, + int tag) override; + + c10::intrusive_ptr barrier( + const BarrierOptions& opts = BarrierOptions()) override; + + void enableCollectivesTiming() override; + + const std::unique_ptr<::gloo::rendezvous::Store>& _getStore() const { + return store_; + } + + // Similar to barrier(), but blocks rank 0 until all other ranks have + // acknowledged that they are alive (through send/recv from rank 0). Rank 0 + // is able to report all failed ranks if waitAllRanks = true, otherwise + // reports the first rank it detected as failed. + void monitoredBarrier( + const BarrierOptions& opts = BarrierOptions(), + bool waitAllRanks = false) override; + + // Agrees on an initial sequence number for the whole group by having rank 0 + // create it and broadcast it to other ranks using the store. + void setSequenceNumberForGroup() override; + + // Retrieves the current sequence number for the whole group, which should be + // in sync. If the returned number is not consistent across the group, it + // may indicate that there is some sort of collective desynchronization. + uint64_t getSequenceNumberForGroup() override; + + int getNumThreads() { + return options_->threads; + } + + protected: + std::unique_ptr<::gloo::rendezvous::Store> store_; + const c10::intrusive_ptr options_; + + // Every Gloo context represents a set of connections to its peers. + // In order to use more than one device (or allow for parallelism on + // a single device), you need multiple contexts. + std::vector> contexts_; + std::vector threads_; + bool stop_; + + // Incremented for every collective we kick off. + // The value is used as tag for collective operations. Collectives are kicked + // off in identical order across processes. Therefore the tag can be used + // to match up operations during concurrent execution. + uint32_t collectiveCounter_; + + // Returns next collective tag to use (uses collectiveCounter_). + uint32_t nextTag(); + + // Returns the context to use for the specified tag. + // With `nextTag` returning an increasing number, this should lead + // to contexts being used in a round-robin fashion. + std::shared_ptr<::gloo::Context> getContext(uint32_t tag); + + // Entrypoint for worker threads. + void runLoop(int workerIndex); + + // Queue work to run on worker thread. + void enqueue(c10::intrusive_ptr work); + + // Keep both a queue of pending work, and a vector with in progress work. + // Both of these can only be mutated when holding the queue lock. + // We keep both around instead of just the queue, so we can grab a weak_ptr + // to all in progress and pending work when executing a barrier. + // When executing a barrier, we need to ensure that all prior work + // has completed before completing itself. + std::deque> workQueue_; + std::vector> workInProgress_; + std::mutex workMutex_; + std::condition_variable workProduceCV_; + std::condition_variable workConsumeCV_; + uint64_t seq_{0}; +}; + +} // namespace c10d + +#endif // USE_C10D_GLOO diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp new file mode 100644 index 0000000000000000000000000000000000000000..5eb06b7395570ecb0d670f52be9ec09fbaa25ab3 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp @@ -0,0 +1,271 @@ +#pragma once + +#ifdef USE_C10D_MPI + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include + +#include + +namespace c10d { + +constexpr const char* MPI_BACKEND_NAME = "mpi"; + +// WorkEntry is the state associated with a single MPI run instance. +// It include the source Tensor list and destination Tensor list, as well as +// The actual run function that will operate either on src or dst or both. +struct WorkEntry { + explicit WorkEntry( + std::vector* srcPtr, + std::vector* dstPtr, + std::function&)> run) + : dst(dstPtr ? *dstPtr : std::vector()), run(std::move(run)) { + if (srcPtr) { + src = *srcPtr; + } + } + + // Not copyable + WorkEntry(const WorkEntry&) = delete; + // Not copy assignable + WorkEntry& operator=(const WorkEntry&) = delete; + + // For input and output tensors (in-place), we will always use src + std::vector src; + + // Copy of user provided outputs. + const std::vector dst; + + // src rank returned, for recv only + int* srcRank = nullptr; + std::function&)> run; +}; + +// ProcessGroupMPI implements MPI bindings for c10d. +// +// All functions on this class are expected to be called in the same +// order across processes in the group. This is the only way that we +// can guarantee to match up the same calls across processes. +// +// All MPI functions provided by this class is asynchronously scheduled on a +// Worker thread. Therefore, ProcessGroupMPI requires the MPI implementation +// that is used to have a minimum thread support value of MPI_THREAD_SERIALIZED. +// That is, The process may be multi-threaded, and multiple threads may make +// MPI calls, but only one at a time: MPI calls are not made concurrently from +// two distinct threads (all MPI calls are serialized). However, with +// MPI_THREAD_SERIALIZED, ProcessGroupMPI will only support a singe process +// group. In other words, no more than 1 process group can be created globally. +// +// If you would like to use multiple ProcessGroupMPI, it requires your MPI +// implementation to have a thread support value of MPI_THREAD_MULTIPLE, that +// is, multiple threads may call MPI, with no restriction. +// +// Also note that ProcessGroupMPI only supports a single Tensor operation. In +// other words, the size of the input Tensor vector should always be 1. +// +// CUDA tensor can be supported if the MPI used is CUDA-aware MPI, and +// ProcessGroupMPI will automatically detect this support. +class TORCH_API ProcessGroupMPI : public Backend { + public: + class WorkMPI : public Work { + public: + explicit WorkMPI( + std::vector outputTensors, + const char* profilingTitle = nullptr, + const std::optional>& inputTensors = + std::nullopt) + : Work(-1, OpType::UNKNOWN, profilingTitle, inputTensors), + outputTensors_(std::move(outputTensors)), + future_(c10::make_intrusive( + c10::ListType::create(c10::TensorType::get()))) {} + + std::vector result() override; + + c10::intrusive_ptr getFuture() override; + + protected: + friend class ProcessGroupMPI; + + private: + void finishWorkMPI(); + void finishWorkMPIError(const std::exception_ptr& eptr); + + std::vector outputTensors_; + c10::intrusive_ptr future_; + }; + + class AsyncWork : public Work { + public: + AsyncWork( + MPI_Request request, + std::vector outputTensors, + const char* profilingTitle = nullptr, + const std::optional>& inputTensors = + std::nullopt); + + ~AsyncWork() override; + + bool isCompleted() override; + + bool isSuccess() const override; + + int sourceRank() const override; + + bool wait(std::chrono::milliseconds timeout = kUnsetTimeout) override; + + void abort() override; + + std::vector result() override; + + protected: + void populateException(); + + private: + const std::vector outputTensors_; + MPI_Request request_; + MPI_Status status_{}; + }; + + // Constructor will spawn up the worker thread loop + explicit ProcessGroupMPI(int rank, int size, MPI_Comm pgComm); + + ~ProcessGroupMPI() override; + + // Abort the MPI program, needs to be called when exception is detected + void abort(); + + const std::string getBackendName() const override { + return std::string(MPI_BACKEND_NAME); + } + + c10::intrusive_ptr broadcast( + std::vector& data, + const BroadcastOptions& opts = BroadcastOptions()) override; + + c10::intrusive_ptr allreduce( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()) override; + + c10::intrusive_ptr allreduce_coalesced( + std::vector& tensors, + const AllreduceCoalescedOptions& opts = + AllreduceCoalescedOptions()) override; + + c10::intrusive_ptr reduce( + std::vector& tensors, + const ReduceOptions& opts = ReduceOptions()) override; + + c10::intrusive_ptr allgather( + std::vector>& outputTensors, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr _allgather_base( + at::Tensor& outputbuffer, + at::Tensor& inputbuffer, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr allgather_coalesced( + std::vector>& outputTensorLists, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr gather( + std::vector>& outputTensors, + std::vector& inputTensors, + const GatherOptions& opts = GatherOptions()) override; + + c10::intrusive_ptr scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ScatterOptions& opts = ScatterOptions()) override; + + c10::intrusive_ptr reduce_scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr alltoall_base( + at::Tensor& outputTensor, + at::Tensor& inputTensor, + std::vector& outputSplitSizes, + std::vector& inputSplitSizes, + const AllToAllOptions& opts = AllToAllOptions()) override; + + c10::intrusive_ptr alltoall( + std::vector& outputTensors, + std::vector& inputTensors, + const AllToAllOptions& opts = AllToAllOptions()) override; + + c10::intrusive_ptr send( + std::vector& tensors, + int dstRank, + int tag) override; + + c10::intrusive_ptr recv( + std::vector& tensors, + int srcRank, + int tag) override; + + c10::intrusive_ptr recvAnysource( + std::vector& tensor, + int tag) override; + + c10::intrusive_ptr barrier( + const BarrierOptions& opts = BarrierOptions()) override; + + // Creating a new ProcessGroupMPI, will initialize MPI if not initialized + static c10::intrusive_ptr createProcessGroupMPI( + std::vector ranks = {}); + + protected: + using WorkType = + std::tuple, c10::intrusive_ptr>; + // Worker thread loop + void runLoop(); + // Helper function that is called by the destructor + void destroy(); + + c10::intrusive_ptr enqueue( + std::unique_ptr entry, + const char* profilingTitle = nullptr, + const std::optional>& inputTensors = + std::nullopt); + + bool stop_; + + std::mutex pgMutex_; + std::thread workerThread_; + + std::deque queue_; + std::condition_variable queueProduceCV_; + std::condition_variable queueConsumeCV_; + + // Global states + static void initMPIOnce(); + static void mpiExit(); + static c10::once_flag onceFlagInitMPI; + + static std::mutex pgGlobalMutex_; + static int mpiThreadSupport_; + + MPI_Comm pgComm_; +}; + +} // namespace c10d + +#endif // USE_C10D_MPI diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp new file mode 100644 index 0000000000000000000000000000000000000000..10a7d20947433a7a30ca04946777e04702e3b92d --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp @@ -0,0 +1,1232 @@ +#pragma once + +#ifdef USE_C10D_NCCL + +#if defined(__linux__) +#include +#include +#include +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace c10d { + +// Control broadcasting of NCCL uniqueId +static std::vector TORCH_NCCL_BCAST_UNIQUEID = { + "TORCH_NCCL_BCAST_UNIQUEID"}; + +// Control whether to always use high priority streams +static std::vector TORCH_NCCL_HIGH_PRIORITY = { + "TORCH_NCCL_HIGH_PRIORITY"}; + +// Control whether or not wait() is blocking or non-blocking. +static std::vector TORCH_NCCL_BLOCKING_WAIT = { + "TORCH_NCCL_BLOCKING_WAIT", + "NCCL_BLOCKING_WAIT"}; + +// TODO: We want to eventually remove this variable and make users to use +// the default value (3 - SkipCleanUp). +// Control whether or not we perform Async Error Handling with NCCL. +static std::vector TORCH_NCCL_ASYNC_ERROR_HANDLING = { + "TORCH_NCCL_ASYNC_ERROR_HANDLING", + "NCCL_ASYNC_ERROR_HANDLING"}; + +// Control whether dumping debug info on watchdog +// timeout is enabled. This variable must be set together with +// TORCH_NCCL_ENABLE_MONITORING=1 and TORCH_NCCL_TRACE_BUFFER_SIZE > 0. +static std::vector TORCH_NCCL_DUMP_ON_TIMEOUT = { + "TORCH_NCCL_DUMP_ON_TIMEOUT"}; + +// Control whether Desync Debug is enabled. This variable must be set +// together with TORCH_NCCL_ASYNC_ERROR_HANDLING. +static std::vector TORCH_NCCL_DESYNC_DEBUG = { + "TORCH_NCCL_DESYNC_DEBUG", + "NCCL_DESYNC_DEBUG"}; + +// Enable recording start-events for all ProcessGroupNCCL collectives, and +// compute accurate collective timing per-collective. (Note: end-events are +// recorded by default. Turn on this flag can increase chances of a watchdog +// hang due to performing a CUDA event query which eventually calls +// cudaEventElapsedTime() API. +static std::vector TORCH_NCCL_ENABLE_TIMING = { + "TORCH_NCCL_ENABLE_TIMING", + "NCCL_ENABLE_TIMING"}; + +// Enable monitoring thread which aborts the process when the ProcessGroupNCCL +// Watchdog thread gets stuck and no heartbeat is detected after +// TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC. This can happen due to calling CUDA/NCCL +// APIs that may hang. It is Useful to prevent jobs being stuck for a prolonged +// time than necessary tying up cluster resources. +static std::vector TORCH_NCCL_ENABLE_MONITORING = { + "TORCH_NCCL_ENABLE_MONITORING"}; + +// Control the watchdog heartbeat timeout period after which the monitoring +// thread will abort the process. +static std::vector TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC = { + "TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC"}; + +// Whether to rethrow CUDA Errors in the watchdog (default true) +static std::vector TORCH_NCCL_RETHROW_CUDA_ERRORS = { + "TORCH_NCCL_RETHROW_CUDA_ERRORS"}; + +// The maximum number of events we store in the flight recorder's ring buffer. +// (One event could be the start or end of a collective, for example). +static std::vector TORCH_NCCL_TRACE_BUFFER_SIZE = { + "TORCH_NCCL_TRACE_BUFFER_SIZE"}; + +// Control how much extra time we will wait for dumping the debugging info +// before we exit and throws timeout exception. +static std::vector TORCH_NCCL_WAIT_TIMEOUT_DUMP_MILSEC = { + "TORCH_NCCL_WAIT_TIMEOUT_DUMP_MILSEC"}; + +// Control the interval inside the monitoring thread to check the coordinated +// signal from other ranks, e.g. to dump the debugging information. +static std::vector TORCH_NCCL_COORD_CHECK_MILSEC = { + "TORCH_NCCL_COORD_CHECK_MILSEC"}; + +// Whether to log C++ stack traces on unclean shutdown (default true) +static std::vector TORCH_NCCL_LOG_CPP_STACK_ON_UNCLEAN_SHUTDOWN = { + "TORCH_NCCL_LOG_CPP_STACK_ON_UNCLEAN_SHUTDOWN"}; + +// Control whether to use CudaEventCache for the collective in watchdog thread. +// We noticed in the past when cuda global lock is held, destroying CudaEvent +// can cause a hang. +static std::vector TORCH_NCCL_CUDA_EVENT_CACHE = { + "TORCH_NCCL_CUDA_EVENT_CACHE"}; + +static std::vector TORCH_NCCL_NAN_CHECK = {"TORCH_NCCL_NAN_CHECK"}; + +constexpr const char* NCCL_BACKEND_NAME = "nccl"; + +constexpr const char* EXCEPTION_DUMP = "exception_dump"; + +constexpr const int kWorkStatusUpdatePeriodMs = 30 * 1000; // 30 seconds + +constexpr auto kProcessGroupNCCLDefaultTimeout = + std::chrono::milliseconds(10 * 60 * 1000); + +// NoHandling: do not handle asynchronous NCCL errors +// TearDown: tear down process upon error, see `WorkNCCL::handleException` +// CleanUpOnly: just clean up collectives and abort communicators without +// tearing down process SkipCleanUp: (this is a temporary option and can be +// removed in future) tear down process without cleaning up NCCL communicators. +// This should be used as a last resort in case `ncclCommAbort` itself is +// hanging +enum ErrorHandlingMode { + NoHandling = 0, + TearDown = 1, + CleanUpOnly = 2, + SkipCleanUp = 3 +}; + +#define SHOULD_CLEAN_UP(a) (a != NoHandling && a != SkipCleanUp) + +#define SHOULD_TEAR_DOWN(a) (a != NoHandling && a != CleanUpOnly) + +#define PRINT_COLLECTIVE_HASH_SIGNATURE(phase, opType, numel, hashValue) \ + LOG(WARNING) << logPrefix() << "Hash of " << phase << " to NCCL " << opType \ + << " with size " << numel << " is " << hashValue; + +// If set, ProcessGroupNCCL doesn't use recordStream calls to ensure +// caching allocator safety for tensors used on both user-facing and +// internal comm streams. +// Instead, it stashes live references to those tensors until after +// user-facing streams are synced with comm streams. +// See stashed_for_allocator_safety_ below. +static std::vector TORCH_NCCL_AVOID_RECORD_STREAMS = { + "TORCH_NCCL_AVOID_RECORD_STREAMS"}; + +// If set, ProcessGroupNCCL registers postAlloc and preFree hooks to cuda cache +// allocator so that whenever a tensor is allocated or freed, ProcessGroupNCCL +// can register/deregister the tensor on all available NCCL communicators. +static std::vector TORCH_NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK = + {"TORCH_NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK", + "NCCL_USE_TENSOR_REGISTER_ALLOCATOR_HOOK"}; + +#if defined(__linux__) +struct DumpPipe { + DumpPipe(int rank) { + std::string fileStem = + getCvarString({"TORCH_NCCL_DEBUG_INFO_PIPE_FILE"}, ""); + if (fileStem.empty() || + getCvarInt({"TORCH_NCCL_TRACE_BUFFER_SIZE"}, 0) <= 0) { + return; + } + TORCH_CHECK(!fileStem.empty(), "TORCH_NCCL_DEBUG_INFO_TEMP_FILE is empty"); + std::string filename = c10::str(fileStem, rank, ".pipe"); + TORCH_CHECK( + unlink(filename.c_str()) != -1 || errno == ENOENT, + "Error removing existing named pipe ", + filename); + TORCH_CHECK( + mkfifo(filename.c_str(), 0666) != -1, + "Error creating named pipe ", + filename); + fd_ = open(filename.c_str(), O_RDONLY | O_NONBLOCK); + LOG(INFO) << "Pipe file " << filename + << " has been opened, write to it to trigger NCCL Debug Dump."; + TORCH_CHECK(fd_ != -1, "Error opening named pipe ", filename); + } + bool shouldDump() { + if (fd_ == -1) { + return false; + } + char buf[128]; + // non-blocking from O_NONBLOCK above. + // Ignore EINTR because we already will poll this + // again later. + ssize_t bytesRead = read(fd_, &buf, 128); + return bytesRead > 0; + } + ~DumpPipe() { + if (fd_ != -1) { + close(fd_); + } + } + + private: + int fd_ = -1; +}; +#else +struct DumpPipe { + DumpPipe(int rank) {} + bool shouldDump() { + return false; + } +}; +#endif + +// ProcessGroupNCCL implements NCCL bindings for c10d. +// +// All functions of the class are expected to be called in the same order +// across all processes in the process group. This is the only way that we +// can guarantee to match up the same calls among all processes. +// +// All NCCL functions provided by this class are asynchronous functions. More +// specifically, each NCCL call is scheduled on a separate CUDA stream that is +// different from the current CUDA stream. This is for the purpose of +// achieving potentially concurrency and better performance. As a result, +// it is the callers' responsibility to make sure that the CUDA stream their +// code works on needs to wait for the NCCL operation from +// this class. +// +// This can be done by calling: +// +// either WorkNCCL::wait() or WorkNCCL::synchronize(), both achieves the same +// functionality and are synonyms. +// +// Also note that WorkNCCL::finishedGPUExecution() is a helper function only +// provided by ProcessGroupNCCL to check if the NCCL operation of WorkNCCL has +// finished execution on the GPU (not just scheduled). +// +// Example on using the NCCL process group +// +// ProcessGroupNCCL pg(store, rank, size); +// std::shared_ptr work = pg.allreduce(tensors); +// +// // At this point, NCCL kernel has already by queued successfully +// // Now, let current stream wait for the NCCL to finish, this function is +// // async operation as well +// +// work->wait() +// +// // Now continue on other work in the current stream. +class TORCH_API ProcessGroupNCCL : public Backend { + public: + class WorkNCCL : public Work, public std::enable_shared_from_this { + public: + friend struct WorkInfo; + + // Constructor takes a list of CUDA devices + WorkNCCL( + const std::string& pgUID, + const std::string& pgDesc, + at::Device& device, + int rank, + OpType opType, + uint64_t seq, + const char* profilingTitle = nullptr, + const std::optional>& inputs = std::nullopt, + bool desyncDebug = false, + bool enableTiming = false, + bool cudaEventCacheEnabled = false, + DebugLevel distDebugLevel = DebugLevel::Off); + // Copy constructor doing partial copy without outputs_. Cleanup thread + // monitors and removes finished works. However it will deadlock when + // destructs outputs_ tensors who are view tensors in autograd graph. + WorkNCCL(const WorkNCCL& w); + + ~WorkNCCL() override; + + // Checks if the NCCL kernel has started to execute. + bool isStarted(); + + // Checks if request has completed. In this specific case of NCCL, it checks + // if the NCCL operation has completed on the GPU in its own NCCL stream. + // Non-blocking operation. + bool isCompleted() override; + + bool isSuccess() const override; + + // Same as calling synchronize() for NCCL work. + bool wait(std::chrono::milliseconds timeout = kNoTimeout) override; + + void abort() override; + + // Let current stream wait on the completing of the NCCL work + // Throws on exceptions. Blocking operation, which will wait for work + // completion. + void synchronize() override; + + // Synchronize streams by blocking each on the NCCL stream + void synchronizeStream(); + + // Helper function to handle exception (throw if needed). + void handleException(ErrorHandlingMode asyncErrorHandling); + + // Helper function that checks if the NCCL kernels have finished + // execution on the GPUs + bool finishedGPUExecution(); + + // Get a Future object that will be marked as completed internally. + c10::intrusive_ptr getFuture() override; + + float getDuration() const override; + + uint64_t getSequencenumber() const override; + + const std::string& logPrefix() const; + + // Helper function that sets an exception_ptr on the WorkNCCL object. + void setException(std::exception_ptr exception_ptr); + + // Helper function that returns True if the WorkNCCL object has timed out + // and False otherwise. + // In case of timeout, set exception on the WorkNCCL object. + bool checkTimeout( + std::optional timeout = std::nullopt); + + std::vector result() override; + + protected: + // The process group unique id + std::string pgUID_; + + // The process group description + std::string pgDesc_; + + // The cached list of CUDA devices to operate on + at::Device device_; + + // The start CUDA event of NCCL operator tracking this work item. These + // start CUDA events are needed by desync debugging if enabled. + std::shared_ptr ncclStartEvent_; + + // The end CUDA event of NCCL operator tracking this work item. + std::shared_ptr ncclEndEvent_; + + // The NCCL communicator used for this work item. + std::shared_ptr ncclComm_; + + // Tensors used for barrier op + at::Tensor barrierTensor_; + + // Clone of blockingWait_ from ProcessGroupNCCL. + bool blockingWait_ = false; + + // Clone of avoidRecordStreams_ from ProcessGroupNCCL. + bool avoidRecordStreams_ = false; + + // Clone of opTimeout_ from ProcessGroupNCCL. + std::chrono::milliseconds opTimeout_; + + // Ephemeral timeouts are owned by exactly one work, + // and reset after that work completes. + // There may be more than one ephemeral timeout active at the same time, + // and this variable is used to track the ownership of ephemeral timeout. + std::chrono::milliseconds ownedEphermeralTimeout_ = + std::chrono::milliseconds(0); + + // Time point representing when the work started. + std::chrono::time_point workStartTime_; + + // Record the collective sequential number. + uint64_t seq_; + + // Indicates if the nccl start event has been updated to the store trace. + // This will be used by desync debug. + bool startTraceUpdated_{false}; + + // Record collective sizes for debug. We only record the size on the first + // device as multi-device per process is deprecated + size_t numelIn_ = -1; + size_t numelOut_ = -1; + + // Wrapper method for the static checkForNCCLErrors which can be overridden + // for tests. + virtual std::exception_ptr checkForNCCLErrors(); + + friend std::ostream& operator<<( + std::ostream& output, + const WorkNCCL& workNCCL); + + private: + // Helper function for synchronize + void synchronizeInternal(std::chrono::milliseconds timeout); + + // Checks for NCCL errors and sets an appropriate exception_ptr. + void checkAndSetException(); + + // Just checks whether GPU execution has started, without modifying + // exception_ptr. + bool startedGPUExecutionInternal() const; + + // Just checks whether GPU execution has completed, without modifying + // exception_ptr. + bool finishedGPUExecutionInternal() const; + + // Reference to the store so that we can write aborted communicators + // to the store. + c10::intrusive_ptr store_; + + // Store a reference to NCCL collective's outputs, used by result and to + // give a more descriptive message when representing the Work as a string. + std::shared_ptr> outputs_; + + // TORCH_NCCL_AVOID_RECORD_STREAMS implementation helper. + // Stores references to participating non-output tensors (ie inputs, + // flattened intermediates). + // We'll clear this list in synchronizeStream, just after user-facing + // stream(s) are synced with the nccl work stream(s). + // By keeping these refs (as well as outputs_) alive until after the + // collective's work rejoins the user-facing streams, we achieve + // caching allocator safety without any recordStream calls. + // For in-place collectives, some refs stashed here may alias outputs_, + // but that doesn't do any harm. + std::shared_ptr> stashed_for_allocator_safety_; + + // The future returned by getFuture. + c10::intrusive_ptr future_; + + bool timingEnabled_; + // unique id used to tell the trace buffer that this + // work has completed + std::optional trace_id_; + DebugLevel distDebugLevel_; + friend class ProcessGroupNCCL; + }; + + class CUDAEventCache { + public: + CUDAEventCache(); + std::shared_ptr create(bool timing); + static CUDAEventCache& get(); + + private: + std::mutex cacheMutex_; + // NOTE: We intentionaly store raw pointers so that + // we do not attempt to destroy the event objects on process exit, + // because cuda may be gone. + std::vector + eventsArray_[2]; // 0 for timing=false, 1 for timing=true + }; + + struct Options : Backend::Options { + // NOTE: timeout in ProcessGroupNCCL::Options denote the timeout for + // operations. This is only used when blockingWait_ is enabled. + explicit Options(bool is_high_priority_stream = false); + + // return intrusive_ptr of the object + static c10::intrusive_ptr create( + bool is_high_priority_stream = false) { + return c10::make_intrusive(is_high_priority_stream); + } + + // Schedule NCCL operations on high priority CUDA streams + bool is_high_priority_stream; + +#ifdef NCCL_HAS_COMM_NONBLOCKING + // Configure ranks + ncclConfig_t config = NCCL_CONFIG_INITIALIZER; +#endif + + // Optional "parent" backend and color to create communicators from + // via `ncclCommSplit` + std::shared_ptr split_from; + int64_t split_color{0}; + std::vector global_ranks_in_group; + std::string group_name; + }; + + // If you wish to create multiple process groups, each with a potentially + // different rank and size, you can do so by passing a new store instance + // to each one. If you have only a single store object, you can + // use the `c10d::PrefixStore` to derive scoped instances. + // This is also what the Python API in torch.distributed does. + // + // The process group instance keeps a reference to the store because + // it may be used long after the constructor runs. In fact, the constructor + // doesn't create any NCCL communicators. A single NCCL communicator can + // only be used on a specific set of devices, and are therefore created + // on-demand when a collective runs. If another collective is executed later, + // against a different set of devices, the process group creates another NCCL + // communicator. These NCCL communicators are cached and reused if possible. + // + ProcessGroupNCCL( + const c10::intrusive_ptr& store, + int rank, + int size, + c10::intrusive_ptr options = Options::create()); + + // This constructor includes the deprecated `groupName` argument. + // If you have existing code that uses the `groupName`, you can replace + // it by specifying a `c10d::PrefixStore(groupName, store)` for store. + C10_DEPRECATED ProcessGroupNCCL( + const c10::intrusive_ptr& store, + int rank, + int size, + const std::string& groupName, + c10::intrusive_ptr options = Options::create()) + : ProcessGroupNCCL(store, rank, size, options) {} + + ~ProcessGroupNCCL() override; + + // This function returns a local uid for ProcessGroupNCCL. + uint64_t getUid() { + return static_cast(local_id_); + } + + c10::intrusive_ptr getOptions() { + return options_; + } + + const std::string getBackendName() const override { + return std::string(NCCL_BACKEND_NAME); + } + + bool supportsSplitting() const override { + return true; + } + + void startCoalescing() override; + + c10::intrusive_ptr endCoalescing() override; + + // For specifying a composite optype, such as ALLGATHER and REDUCE_SCATTER + c10::intrusive_ptr endCoalescing(OpType optype); + + c10::intrusive_ptr broadcast( + std::vector& tensors, + const BroadcastOptions& opts = BroadcastOptions()) override; + + c10::intrusive_ptr _broadcast_oop( + at::Tensor& outputTensors, + at::Tensor& inputTensors, + const BroadcastOptions& opts = BroadcastOptions()); + + c10::intrusive_ptr allreduce_sparse( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()) override; + + c10::intrusive_ptr allreduce( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()) override; + + c10::intrusive_ptr allreduce_coalesced( + std::vector& tensors, + const AllreduceCoalescedOptions& opts = + AllreduceCoalescedOptions()) override; + + c10::intrusive_ptr reduce( + std::vector& tensors, + const ReduceOptions& opts = ReduceOptions()) override; + + c10::intrusive_ptr _reduce_oop( + at::Tensor& outputTensors, + at::Tensor& inputTensors, + const ReduceOptions& opts = ReduceOptions()); + + c10::intrusive_ptr allgather( + std::vector>& outputTensors, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr _allgather_base( + at::Tensor& outputbuffer, + at::Tensor& inputbuffer, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr allgather_coalesced( + std::vector>& outputTensorLists, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr allgather_into_tensor_coalesced( + std::vector& outputs, + std::vector& inputs, + const AllgatherOptions& opts = AllgatherOptions()) override; + + c10::intrusive_ptr reduce_scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr _reduce_scatter_base( + at::Tensor& outputTensor, + at::Tensor& inputTensor, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr reduce_scatter_tensor_coalesced( + std::vector& outputs, + std::vector& inputs, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override; + + c10::intrusive_ptr barrier( + const BarrierOptions& opts = BarrierOptions()) override; + + c10::intrusive_ptr alltoall_base( + at::Tensor& outputTensor, + at::Tensor& inputTensor, + std::vector& outputSplitSizes, + std::vector& inputSplitSizes, + const AllToAllOptions& opts = AllToAllOptions()) override; + + c10::intrusive_ptr alltoall( + std::vector& outputTensors, + std::vector& inputTensors, + const AllToAllOptions& opts = AllToAllOptions()) override; + + c10::intrusive_ptr send( + std::vector& tensors, + int dstRank, + int tag) override; + + c10::intrusive_ptr recv( + std::vector& tensors, + int srcRank, + int tag) override; + + void groupStart(); + + void groupEnd(); + + void groupEndNonblocking(std::shared_ptr comm); + + c10::intrusive_ptr gather( + std::vector>& outputTensors, + std::vector& inputTensors, + const GatherOptions& opts = GatherOptions()) override; + + c10::intrusive_ptr scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ScatterOptions& opts = ScatterOptions()) override; + + // Unsupported Ops + c10::intrusive_ptr recvAnysource( + std::vector& tensors, + int tag) override; + + // Agrees on an initial sequence number for the whole group by having rank 0 + // create it and broadcast it to other ranks using the store. + void setSequenceNumberForGroup() override; + + // Retrieves the current sequence number for the whole group, which should be + // in sync. If the returned number is not consistent across the group, it + // may indicate that there is some sort of collective desynchronization. + uint64_t getSequenceNumberForGroup() override; + + // Return the total number of splits the communicators held by this process + // group have performed. Counts ncclCommCreateFromRanks() for ncclx v2.21.5+ + uint64_t getCommSplitCounter() const; + + void registerOnCompletionHook( + std::function)>&& hook) override; + void waitForPendingWorks() override; + + void enableCollectivesTiming() override; + + // Helper function for iteratively aborting communicators in the provided map + void abortCommsFromMap( + std::unordered_map>& ncclCommsMap, + std::optional abortReason); + + c10::intrusive_ptr initIntraNodeComm(); + + // Provides an API to abort the ProcessGroup (similar to ncclCommAbort) + // instead of relying on ProcessGroupNCCL destructor. + // return true if abort is successful, otherwise false + bool abort(std::optional abortReason = std::nullopt); + + void shutdown(std::optional reason = std::nullopt); + + void eagerConnectSingleDevice(at::Device device) override; + + void performNocolorSplit(at::Device device); + + // This method adds a temporary extension for the timeout period, + // applying to all collectives between the calling of this API and + // the completion of the first collective on the GPU. While this feature + // provides flexibility in specific scenarios, it introduces statefulness + // to timeout setting. Therefore, it is advisable to use this API sparingly + // and consider alternative approaches, such as directly setting the timeout + // or utilizing a barrier collective (one can set any timeout to the barrier), + // whenever feasible. + void addEphemeralTimeout(const std::chrono::milliseconds& timeout); + + // This function is only intended for testing purposes because we don't + // want to expose the `WorkNCCL` via pybind. It verifies whether the + // `opTimeout_` of the provided WorkNCCL instance is the same as the specified + // timeout. + bool verifyWorkTimeoutForTest( + const c10::intrusive_ptr work, + const std::chrono::milliseconds& timeout); + + protected: + // Helper that broadcasts nccl unique ID to all ranks through the store + void broadcastUniqueNCCLID( + ncclUniqueId* ncclID, + bool isSingleP2POp, + const std::string& devicesKey, + int p2pRank); + + // Helper that either looks up the cached NCCL communicators or creates + // a new set of NCCL communicators as a cache entry + std::shared_ptr getNCCLComm( + const std::string& deviceKey, + at::Device& device, + OpType opType, + int p2pRank = 0, + bool isSendRecvSelf = false); + + // Wrapper method which can be overridden for tests. + virtual std::exception_ptr checkForNCCLErrors( + std::shared_ptr& ncclComm); + + // Ensure thaht if record is True, the work obj will be enqueued via + // workEnqueue + virtual c10::intrusive_ptr initWork( + at::Device& device, + int rank, + OpType opType, + const char* profilingTitle = nullptr, + const std::vector& inputs = {}, + const std::vector& outputs = {}, + bool record = false); + + // In the timeout case and we will dump debug info such as the NCCL flight + // recorder to storage. Down the road, if we have more complicated or blocking + // operations, we might need to use a side thread to do it. + bool dumpDebuggingInfo(); + + private: + int globalRankStart; + int globalRankStride; + + // Helper that encapsulates work shared across all collective communication + // primitives. The callbacks have the following signatures: + // + // ncclResult_t fn(at::Tensor& input, at::Tensor& output, + // ncclComm_t, at::cuda::CUDAStream&); + // void {pre,post}(std::vector); + template + c10::intrusive_ptr collective( + at::Tensor& input, + at::Tensor& output, + Fn fn, + OpType opType, + const char* profilingTitle = nullptr, + bool avoidRecordStreams = false, + bool nanCheck = true); + + template + c10::intrusive_ptr collective( + at::Tensor& input, + at::Tensor& output, + Fn fn, + PreProcess pre, + PostProcess post, + OpType opType, + const char* profilingTitle = nullptr, + bool avoidRecordStreams = false, + bool nanCheck = true); + + template + c10::intrusive_ptr collective( + std::vector& inputs, + std::vector& outputs, + Fn fn, + PreProcess pre, + PostProcess post, + OpType opType, + const char* profilingTitle = nullptr, + bool avoidRecordStreams = false, + bool nanCheck = true); + + template + c10::intrusive_ptr collectiveCoalesced( + std::vector& input, + std::vector& output, + Fn fn, + OpType opType, + const char* profilingTitle = nullptr, + bool avoidRecordStreams = false); + + // Helper that encapsulates work shared across point-to-point communication + // primitives. It is the same structure as the helper used for collective + // communication primitives. + template + c10::intrusive_ptr pointToPoint( + at::Tensor& tensor, + Fn fn, + int peer, + OpType opType, + const char* profilingTitle = nullptr); + + template + c10::intrusive_ptr pointToPoint( + at::Tensor& tensor, + Fn fn, + int peer, + OpType opType, + PreProcess pre, + PostProcess post, + const char* profilingTitle); + + c10::intrusive_ptr allreduce_impl( + at::Tensor& tensor, + const AllreduceOptions& opts = AllreduceOptions()); + + // Checks for NCCL errors on each of the communicators and returns an + // appropriate exception_ptr (nullptr if no errors). + static std::exception_ptr checkForNCCLErrorsInternal( + std::shared_ptr& ncclComm); + + // Function that runs as part of a separate thread and checks for errors on + // NCCL communicators. We need a separate thread to check for NCCL errors + // since we can't rely on the user calling certain methods like wait(), + // isCompleted() etc. to detect and remediate errors. In addition to this, we + // need a mechanism to safely abort and remove NCCL communicators from our + // cache. This can be done cleanly by having a thread for the ProcessGroupNCCL + // class. Attempting to modify the communicator cache from the WorkNCCL class + // might run into issues with object lifetime since the ProcessGroupNCCL + // object might get destroyed before the WorkNCCL object. + void ncclCommWatchdog(); + + // Return the CUDA device most likely associated with this backend. + // If we aren't bound to a specific device, there is no strict + // guarantee that this heuristic is the correct assignment of ranks + // to GPUs that Python layers use, but in practice it tends to be. + // Fortunately we don't rely on this for correctness of any tensor + // operations, just for ancillary uses like barriers. + at::Device guessDeviceForRank() const; + + // Destroys initialized NCCL communicators in devNCCLComMap_ given by input + // key. Throws if there are no communicators to destroy. Also removes + // communicators from the cache and clears used device indices. + void destroyNCCLComms(const std::string& devNCCLCommMapKey); + + // Watchdog's inside loop. + // Takes care of cleaning up completed work, and aborting upon failure or + // timeout. + void watchdogHandler(); + + void runHookLoop(); + + // Desync debug helper + void logWorkStart(WorkNCCL& work); + + // Desync debug helper + void logWorkEnd(WorkNCCL& work); + + // Generates a prefix that is unique to this process group and rank, for + // disambiguating logs + std::string createLogPrefix() const; + + // Returns the unique prefix created in createLogPrefix + const std::string& logPrefix() const; + + // Returns the global rank of the device. This function assumes that users + // always create a default global process group(PG) which includes all + // devices. It is called in the constructor of ProcessGroupNCCL, so it always + // return the rank_ of the the very first PG created, aka, default global PG. + const int& globalRank() const; + + // Returns the global ranks of a PG. + const std::vector& groupRanks() const; + + // Util function to assign timeout to each work. + void assignTimeoutToWork( + const c10::intrusive_ptr& work, + const c10::intrusive_ptr& option); + + protected: + // Function that runs as part of a separate thread aside from watchdog + // thread because we need to check the heartbeat from watchdog thread + // so that when we get stuck in some NCCL/CUDA calls, + // we can dump the debugging information and abort the process. + virtual void heartbeatMonitor(); + + // Function that directly trigger std::abort so that the whole process + // gets terminated. + virtual void terminateProcess(std::string errMsg); + + // A helper function to wait for a future to complete or timeout. + void waitForFutureOrTimeout( + std::future& fut, + const std::chrono::milliseconds& timeOutMilSec, + const std::string& futDescription, + bool throwException = false, + bool log = false); + + // When watchdog timeout, this function will be called and return debug info + // for users. For now we only get information from retrieveDesyncReport. + // We are working on enabling more useful debug information for watchdog + // timeout. + virtual std::string getNCCLWatchdogDebugInfo(); + + std::string getNCCLWatchdogTimeoutErrorMsg(const std::string& extraMsg); + + std::string getNCCLWatchdogTimeoutExitMsg(const std::string& exitReason); + + static const int64_t kWatchdogThreadSleepMillis; + + // The store is used to broadcast the NCCL unique ID of rank 0. This store + // comes with prefix and it is different across ProcessGroup NCCL instances + // (aka, different ProcessGroups). + c10::intrusive_ptr store_; + + // Reference to the store without prefix so that keys are same across all + // ProcessGroup NCCL instances and (key, value) pairs written to the store are + // global. + c10::intrusive_ptr globalStore_; + + bool storeError_{false}; + + // The lock which protects the write/read of + // ephemeralTimeoutActive_/ephemeralTimeoutInflight_. + // TODO(fduwjj): We need to have an audit on all mutexes we are adding here. + // And consolidate them if possible. + std::mutex mtxTimeoutExtension_; + + // The ephemeral timeout added on top of existing timeout for works issued + // before first work finishes. + std::chrono::milliseconds ephemeralTimeoutActive_ = + std::chrono::milliseconds(0); + + // The ephemeral timeout addition which has been already applied to work. + std::chrono::milliseconds ephemeralTimeoutInflight_ = + std::chrono::milliseconds(0); + + const c10::intrusive_ptr options_; + + // The number of NCCL communicators that have been created during + // the lifetime of this process group. This sequence number is + // used to scope keys used in the store. + uint64_t ncclCommCounter_{0}; + + // The store keys to trace the last NCCL collective kernel CUDA events - start + // event and end event respectively. These are used to do desync root cause + // analysis. + const std::string traceKeyStart_; + const std::string traceKeyEnd_; + + // The NCCL communicator that the process group has cached. + // + // For collective operations: + // The key is a list of GPU devices that an operation is operating on + // The GPU devices are stored in a device sequence and the cache NCCL + // communicator is associated with this GPU device sequence + // + // e.g. If the process group op only uses device 0, then the value of + // the used device string stored (value of the hashmap) would be "0". + // + // If the process group op uses device 0 - 7 and the each tensor of the + // input tensor list is on device, 0, 1, 2, 3, 4, 5, 6, 7 separately, + // then the value of the used device string (key) stored would be + // "0,1,2,3,4,5,6,7" + // + // If the process group op uses device 0 - 7 and the each tensor of the + // input tensor list is on device, 0, 4, 5, 6, 7, 1, 2, 3 separately, + // then the value of the used device string stored would be + // "0,4,5,6,7,1,2,3" + // + // Note that the order of the device for the tensor list matters. + // + // For point-to-point operations: + // The key is a string of my current rank and the peer process rank. + // e.g. If process 1 and process 2 are involved in a point-to-point + // communication, the key will be "1:2" on both processes. Note: this is for + // the scenario where there is only 1 GPU per process. When it comes to + // multiple GPUs per process, this part may need to redesigned. + // TODO: we probably need a separte map for P2P comms + std::unordered_map> devNCCLCommMap_; + + // The NCCL communicators currently in process of being initialized. + std::unordered_map> + inInitializationCommMap_; + + // Mutex to guard maps like devNCCLCommMap_. + std::mutex mutex_; + + // Heartbeat of watchdog thread. + std::atomic_uint64_t heartbeat_; + + // The time interval used for deciding whether there is no watchdog heartbeat. + int heartbeatTimeoutInSec_; + + // timeout for the dump to finish. + int waitTimeoutDumpInMilSec_; + + // Interval of check coordinated signals in ProcessGroupNCCL from other ranks + // e.g., trigger the dump of the debugging info for timeout when notified. + int coordCheckIntervalMilSec_; + + // Size of ring buffer where we store NCCL Traces for debugging. + int ncclTraceBufferSize_; + + // We gate the heartbeat monitor thread so that we can roll it out gradually. + std::atomic monitorThreadEnabled_; + + // We gate the cudaEventCache so that we can roll it out gradually. + std::atomic cudaEventCacheEnabled_; + + // Monitor thread which checks the heartbeat of Watchdog thread. + // If the monitor thread finds there is no heartbeat, it will dump debug info + // and then kill the watchdog thread to avoid hang. + std::thread ncclHeartbeatMonitorThread_; + + // Watchdog thread which looks for errors on the cached NCCL communicators. + std::thread ncclCommWatchdogThread_; + + std::thread onCompletionHookThread_; + + // Whether or not we should terminate the watchdog and workCleanup threads. + std::atomic terminateProcessGroup_; + + // Whether or not we should terminate the heartbeat monitoring threads. + std::atomic terminateHeartbeatMonitorThread_; + + // Whether we are in the shutdown mode when we are trying to get debug info, + // such as desync report. + std::atomic collectiveDebugInfoMode_; + + // Whether there are hooks pending to be fired + std::atomic hasPendingHooks_; + + // This is the signal from watchdog threads to indicate whether the monitor + // thread should dump. Making it static so that it is accessiable from all the + // PGs. With this flag, monitor thread would dump debug info under any one of + // the three conditions: + // + // 1: watchdog thread of any PG detects a collective timeout. + // 2: timeout signal is received from other ranks through tcpstore. + // 3: current PG's watchdog heartbeat timeout occurs. + // + // Note that only the monitor thread from PG0 will dump the debug info for + // case one and two so that the debug info is only dumped once. + static std::atomic shouldDump_; + + // Mutex to Guard workMetaList_ + std::mutex workMetaListMutex_; + + // Mutex to Guard monitorWakeUpCV_ + std::mutex monitorMutex_; + + bool writeDebugInfo_ = false; + + // Condition Variable for watchdog thread sleep + std::condition_variable workMetaListCV_; + + // Condition Variable for monitor thread to wake up early + std::condition_variable monitorWakeUpCV_; + + // Vector to Store WorkNCCL pointers + std::list workMetaList_; + + std::chrono::time_point lastWorkListUpdateTime_; + + // Mutex to Guard workMetaList_ + std::mutex completedWorkListMutex_; + + // Condition Variable for watchdog thread sleep + std::condition_variable completedWorkListCV_; + + std::list completedWorkList_; + + // Add Work Pointer to workVector + void workEnqueue(c10::intrusive_ptr); + + // The CUDA streams used by NCCL kernels + std::unordered_map ncclStreams_; + + // The CUDA events used to sync NCCL streams + std::unordered_map ncclEvents_; + + // Device Indexes used for all collectives in this group + std::set usedDeviceIdxs_; + + // Flag to denote if a coalescing groupStart/groupEnd block is active + int coalescing_state_ = 0; + + // Stores device indexes for all collectives run inside a coalescing block + at::Device coalescedDevice_ = at::Device("cuda"); + + // Stores communicators for all collectives run inside a coalescing block + std::shared_ptr coalescedComm_ = nullptr; + + // map from the key: "group name + pg counter (ID)" to the + // unique NCCL ID count. This needs to be group and pg specific + // + // For each process group, we need a uniform unique NCCL ID counter to ensure + // that NCCL operation in this process group can be completed successfully. + // Since each process group ID belongs to a group name, the key to this map + // is a combination of group name and ProcessGroupNCCL ID. + static std::unordered_map pgUniqueNCCLIDCnt_; + + // map from group name to the pg counter (ID) within that group + // + // For each group with the "group name" (which is the key), we need to + // keep track of a unique process group ID when creating a new + // ProcessGroupNCCL for this "group name". Therefore, the value of this + // map keeps the unique ProcessGroupNCCL's ID for a specific group with + // the "group name". The reason we need a per-group process group ID counter + // is that different group can have different ranks and we need ensure that + // each group has its own uniform process group ID for all its ranks. + static std::unordered_map processGroupCounterMap_; + + // Whether or not wait() and synchronize() are blocking operations that wait + // for the operation to complete. + bool blockingWait_ = false; + + // Whether or not to hook the cache allocator to register all allocated + // tensors + bool useTensorRegisterAllocatorHook_ = false; + + // Whether or not the workCleanupThread is used to perform async error + // handling. + ErrorHandlingMode asyncErrorHandling_ = NoHandling; + + // Whether or not to enable timeout root cause analysis. + bool desyncDebug_; + + // Whether or not to dump debug info on exception including both watchdog + // timeout and nccl errors. + bool dumpOnTimeoutOrEx_; + + // Whether or not to enable nan check for input tensors to collectives. + bool enableNanCheck_; + + // Whether or not to print C++ stack traces to logs on unclean shutdown. + bool logCppStackOnUncleanShutdown_; + + // Whether or not to create start CUDAEvent and enable timing for start + // and end events. Note that enableTiming_ is always true if desyncDebug_ + // is set to true. + std::atomic enableTiming_; + + // Flag to enable the print of hash value of input/output of collectives for + // verification. + std::atomic enableCollecticeHashDebug_; + + // Whether or not TORCH_NCCL_AVOID_RECORD_STREAMS was set + bool avoidRecordStreams_ = false; + + // Whether the NCCL watchdog should rethrow CUDA errors. + bool rethrowCUDAErrors_ = false; + + // Set of communicators that this process group has aborted and their + // ncclUniqueId has been written to the store. We don't need a lock + // for this map since only the watchdog thread accesses this set. The + // set contains the string representation of ncclUniqueId. + std::unordered_set abortedComms_; + + // The number of active ncclGroupStart() calls. This counter will be increased + // by 1 when ncclGroupStart() is called and decreased by 1 when ncclGroupEnd() + // is called. + static thread_local uint64_t ncclActiveGroupCounter_; + + // Counting for the sequential number of NCCL collective call. + // (specifically, how many actual kernels we launched, which differs from + // op_id_ when coalescing is enabled) + uint64_t seqCollective_{0}; + + // Counting for the sequential number of NCCL P2P calls. + uint64_t seqP2P_{0}; + + // Incrementing counter for logical operations (collective or p2p) issued on + // the ProcessGroup + uint64_t op_id_{0}; + + std::exception_ptr watchDogException_ = nullptr; + + // The number of ProcessGroupNCCL created on the current rank. + size_t local_id_; + + std::string logPrefix_; + + c10::intrusive_ptr intraNodeComm_; + + // Number of devices on this node. + int localDeviceCount_{0}; + + std::shared_ptr pgStatus_ = + std::make_shared(); +}; + +// Dumps the NCCL comm traces and additional information about the Process +// Group. +TORCH_API std::string dump_nccl_trace( + bool includeCollectives, + bool includeStackTraces, + bool onlyActive); + +// Dumps the NCCL comm traces and additional information about the Process +// Group in JSON formatted string. +// We don't include stack traces in JSON format as it is far too much data. +TORCH_API std::string dump_nccl_trace_json( + bool includeCollectives, + bool onlyActive); + +// Gets a mutable reference to a global optional function.Heartbeat Monitor +// will use this function to dump traces, if available. Inside fbcode, we +// store a function here that uses an internal tool for process tracing +TORCH_API std::optional< + std::function)>>& +get_cpp_trace_dumper(); + +// Similar to get_cpp_trace_dumper, this stores a function defined in +// torch-python layer that lets us check whether the GIL can be acquired, +// helpful for instrumenting in cases where a hang was observed. +typedef bool (*gil_checker_t)(); + +TORCH_API gil_checker_t& get_gil_checker(); +} // namespace c10d + +#endif // USE_C10D_NCCL diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PyProcessGroup.hpp b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PyProcessGroup.hpp new file mode 100644 index 0000000000000000000000000000000000000000..265c78f1b78cf9a59bd8400581e71912969c2890 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PyProcessGroup.hpp @@ -0,0 +1,249 @@ +#pragma once + +#include +#include +#include + +namespace c10d { + +// PyProcessGroup is a pybind11 trampoline class to allow a Python +// class to inherit from torch.distributed.ProcessGroup +class PyProcessGroup : public ProcessGroup { + public: + // PyWork is a pybind11 trampoline class to allow a Python + // class to inherit from torch.distributed.Work + class TORCH_PYTHON_API PyWork : public Work { + public: + PyWork() = default; + + bool wait(std::chrono::milliseconds timeout = kNoTimeout) override { + PYBIND11_OVERRIDE( + bool, /* Return type */ + Work, /* Parent class */ + wait, /* Name of function in C++ */ + timeout); + } + + c10::intrusive_ptr getFuture() override { + // We cannot use PYBIND11_OVERRIDE because: + // 1. We have to >MANUALLY< unwrap the PyFutureWrapper and + // 2. The python name is get_future + pybind11::gil_scoped_acquire gil; + auto override = + pybind11::get_override(static_cast(this), "get_future"); + + if (override) { + py::object o = override(); + auto futWrapper = + o.cast>(); + return futWrapper->fut; + } + + return Work::getFuture(); + } + + // Take a reference of the corresponding py::object. + // With functional collectives, ownership of work objects is generally + // transferred to C++. For pure C++ work objects, it is sufficient to + // transfer the ownership of work object. For user-defined work objects in + // Python, it is necessary to keep the corresponding py::object alive in + // addition to ensure that the user-defined methods can be executed. + void ref_py_object() { + py_obj_ = py::cast(this); + } + + private: + py::object py_obj_; + }; + + using ProcessGroup::ProcessGroup; + + const std::string getBackendName() const override { + PYBIND11_OVERRIDE_PURE( + std::string, /* Return type */ + ProcessGroup, /* Parent class */ + getBackendName, /* Name of function in C++ */ + ); + } + + c10::intrusive_ptr allgather( + std::vector>& outputTensors, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + allgather, /* Name of function in C++ */ + outputTensors, + inputTensors, + opts); + } + + c10::intrusive_ptr allgather_into_tensor_coalesced( + std::vector& outputTensors, + std::vector& inputTensors, + const AllgatherOptions& opts = AllgatherOptions()) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + allgather_into_tensor_coalesced, /* Name of function in C++ */ + outputTensors, + inputTensors, + opts); + } + + c10::intrusive_ptr allreduce( + std::vector& tensors, + const AllreduceOptions& opts = AllreduceOptions()) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + allreduce, /* Name of function in C++ */ + tensors, + opts); + } + + c10::intrusive_ptr allreduce_coalesced( + std::vector& tensors, + const AllreduceCoalescedOptions& opts = + AllreduceCoalescedOptions()) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + allreduce_coalesced, /* Name of function in C++ */ + tensors, + opts); + } + + c10::intrusive_ptr alltoall_base( + at::Tensor& outputBuffer, + at::Tensor& inputBuffer, + std::vector& outputSplitSizes, + std::vector& inputSplitSizes, + const AllToAllOptions& opts = AllToAllOptions()) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + alltoall_base, /* Name of function in C++ */ + outputBuffer, + inputBuffer, + outputSplitSizes, + inputSplitSizes, + opts); + } + + c10::intrusive_ptr barrier( + const BarrierOptions& opts = BarrierOptions()) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + barrier, /* Name of function in C++ */ + opts); + } + + c10::intrusive_ptr broadcast( + std::vector& tensors, + const BroadcastOptions& opts = BroadcastOptions()) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + broadcast, /* Name of function in C++ */ + tensors, + opts); + } + + c10::intrusive_ptr reduce_scatter( + std::vector& outputTensors, + std::vector>& inputTensors, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + reduce_scatter, /* Name of function in C++ */ + outputTensors, + inputTensors, + opts); + } + + c10::intrusive_ptr reduce_scatter_tensor_coalesced( + std::vector& outputTensors, + std::vector& inputTensors, + const ReduceScatterOptions& opts = ReduceScatterOptions()) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + reduce_scatter_tensor_coalesced, /* Name of function in C++ */ + outputTensors, + inputTensors, + opts); + } + + c10::intrusive_ptr send( + std::vector& tensors, + int dstRank, + int tag) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + send, /* Name of function in C++ */ + tensors, + dstRank, + tag); + } + + c10::intrusive_ptr recv( + std::vector& tensors, + int srcRank, + int tag) override { + PYBIND11_OVERRIDE( + c10::intrusive_ptr, /* Return type */ + ProcessGroup, /* Parent class */ + recv, /* Name of function in C++ */ + tensors, + srcRank, + tag); + } +}; + +class TORCH_PYTHON_API PythonOnCompletionHook { + public: + // Wraps a py::object hook and acquires Python GIL in dtor before + // destructing the hook object. + PythonOnCompletionHook(py::object hook) : hook_(std::move(hook)) {} + + ~PythonOnCompletionHook() { + py::gil_scoped_acquire ag; + hook_.dec_ref(); + // Explicitly set hook_ to nullptr to prevent py::object's dtor + // to decref on the PyObject again. + // See Note [Destructing py::object] in python_ivalue.h + hook_.ptr() = nullptr; + } + + void operator()(const std::shared_ptr& workInfo) const { + std::exception_ptr eptr; + { + py::gil_scoped_acquire acquire; + try { + hook_(workInfo); + } catch (py::error_already_set& e) { + // py::error_already_set requires GIL to destruct, take + // special care. + eptr = std::make_exception_ptr(std::runtime_error(e.what())); + e.restore(); + PyErr_Clear(); + } catch (std::exception& e) { + eptr = std::current_exception(); + } + } + // No more Python-related stuff at this point, i.e., this + // exception can be captured and handled by PG backend. + if (eptr) + std::rethrow_exception(eptr); + } + + private: + py::object hook_; +}; + +} // namespace c10d diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Store.hpp b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Store.hpp new file mode 100644 index 0000000000000000000000000000000000000000..d18de830ff7f33b6410bcc91e8b3c567995aa904 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Store.hpp @@ -0,0 +1,128 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace c10d { + +// callback function will be given arguments (std::optional oldValue, +// std::optional newValue) +using WatchKeyCallback = + std::function, std::optional)>; + +class TORCH_API Store : public torch::CustomClassHolder { + public: + static constexpr std::chrono::milliseconds kDefaultTimeout = + std::chrono::seconds(300); + static constexpr std::chrono::milliseconds kNoTimeout = + std::chrono::milliseconds::zero(); + + Store() : timeout_(kDefaultTimeout) {} + + explicit Store(const std::chrono::milliseconds& timeout) + : timeout_(timeout) {} + + Store(const Store&) = default; + Store(Store&&) noexcept = default; + + ~Store() override = default; + + void set(const std::string& key, const std::string& value); + + virtual void set( + const std::string& key, + const std::vector& value) = 0; + + std::string compareSet( + const std::string& key, + const std::string& currentValue, + const std::string& newValue); + + virtual std::vector compareSet( + const std::string& key, + const std::vector& currentValue, + const std::vector& newValue) { + TORCH_INTERNAL_ASSERT(false, "Not implemented."); + } + + std::string get_to_str(const std::string& key); + + virtual std::vector get(const std::string& key) = 0; + + virtual int64_t add(const std::string& key, int64_t value) = 0; + + virtual bool deleteKey(const std::string& key) = 0; + + virtual bool check(const std::vector& keys) = 0; + + virtual int64_t getNumKeys() = 0; + + virtual void wait(const std::vector& keys) = 0; + + virtual void wait( + const std::vector& keys, + const std::chrono::milliseconds& timeout) = 0; + + virtual const std::chrono::milliseconds& getTimeout() const noexcept; + + virtual void setTimeout(const std::chrono::milliseconds& timeout); + + // watchKey() is deprecated and no longer supported. + virtual void watchKey( + const std::string& /* unused */, + WatchKeyCallback /* unused */) { + TORCH_CHECK(false, "watchKey is deprecated, no implementation support it."); + } + + virtual void append( + const std::string& key, + const std::vector& value); + + virtual std::vector> multiGet( + const std::vector& keys); + + virtual void multiSet( + const std::vector& keys, + const std::vector>& values); + + // Returns true if this store support append, multiGet and multiSet + virtual bool hasExtendedApi() const; + + protected: + std::chrono::milliseconds timeout_; +}; + +/* +StoreTimeoutGuard is a RAII guard that will set the store timeout and restore it +when it returns. +*/ +class StoreTimeoutGuard { + public: + explicit StoreTimeoutGuard( + Store& store, + const std::chrono::milliseconds& timeout) + : store_(store), oldTimeout_(store.getTimeout()) { + store.setTimeout(timeout); + } + + ~StoreTimeoutGuard() { + store_.setTimeout(oldTimeout_); + } + + /* Disabling copy and move semantics */ + StoreTimeoutGuard(const StoreTimeoutGuard&) = delete; + StoreTimeoutGuard& operator=(const StoreTimeoutGuard&) = delete; + StoreTimeoutGuard(StoreTimeoutGuard&&) = delete; + StoreTimeoutGuard& operator=(StoreTimeoutGuard&&) = delete; + + private: + Store& store_; + std::chrono::milliseconds oldTimeout_{}; +}; + +} // namespace c10d diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCTracing.hpp b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCTracing.hpp new file mode 100644 index 0000000000000000000000000000000000000000..953cec8a1bc36e8550b026e49ef4d2b4fab76e75 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UCCTracing.hpp @@ -0,0 +1,58 @@ +#pragma once + +#ifdef USE_C10D_UCC + +#include + +namespace c10d { + +#define RECORD_COMMS_TRACE( \ + _comms_tracer, _work, _opType, _rank, _comm_size, _inTensors, _outTensors) \ + do { \ + if (torch_ucc_config.enable_comms_logger) { \ + _comms_tracer->recordComms( \ + opTypeToString(_opType), \ + (uintptr_t)_work.get(), \ + _rank, \ + _comm_size, \ + _inTensors, \ + _outTensors); \ + } \ + } while (0) + +// interfaces to collect communication traces +class TORCH_API CommTraceLogger : public torch::CustomClassHolder { + private: + std::vector comms_trace_; + std::vector curBlocks_; /* unused */ + std::vector curOutSplitSizes_; + std::vector curInSplitSizes_; + int curRoot_ = -1; + unsigned long seqnum = 0; + + public: + void setCurBlock(const std::string& name); /* unused */ + void popBlock(); /* unused */ + // record root info if applicable, e.g., broadcast, gather, scatter + void recordOptionalInfo(int root = -1); + // record input/output splits of Alltoallv + void recordOptionalInfo( + const std::vector& outputSplitSizes = {}, + const std::vector& inputSplitSizes = {}); + // record essential comms information + void recordComms( + const std::string& collName, + const uintptr_t workReq = 0, + const int rank = -1, + const int world_size = -1, + const std::vector& inputTensors = {}, + const std::vector& outputTensor = {}); + // return collected comms traces + std::vector& getCommsTrace() { + return comms_trace_; + } +}; + +} // namespace c10d + +#endif // USE_C10D_UCC diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UnixSockUtils.hpp b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UnixSockUtils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..531f8459aa7ad4739448040dde4eda5769079f90 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UnixSockUtils.hpp @@ -0,0 +1,25 @@ +#pragma once + +#include + +namespace c10d::tcputil { + +#define CONNECT_SOCKET_OFFSET 2 + +inline int poll(struct pollfd* fds, unsigned long nfds, int timeout) { + return ::poll(fds, nfds, timeout); +} + +inline void addPollfd( + std::vector& fds, + int socket, + short events) { + fds.push_back({.fd = socket, .events = events}); +} + +inline struct ::pollfd getPollfd(int socket, short events) { + struct ::pollfd res = {.fd = socket, .events = events}; + return res; +} + +} // namespace c10d::tcputil diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Utils.hpp b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Utils.hpp new file mode 100644 index 0000000000000000000000000000000000000000..ea4a4653bc35fc2fc28852f8dcf20309bb902dd7 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Utils.hpp @@ -0,0 +1,729 @@ +#pragma once + +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#include +#include +typedef SSIZE_T ssize_t; +#pragma comment(lib, "Ws2_32.lib") +#else +#include +#include +#include +#include +#include +#endif + +#include + +#include +#include +#include +#include +#include + +namespace c10d { + +TORCH_API size_t getTensorsNumel(const std::vector& tensors); + +// Retrieve tensor shapes from a given tensor. +TORCH_API std::vector getTensorShapes( + const std::vector& tensors); + +// Use -2 to represent unset state of env vars +#define C10D_ENV_NOT_SET -2 + +#define WARN_ENV_VAR_ONCE(deprecated_env, new_env) \ + TORCH_WARN_ONCE( \ + "Environment variable " + deprecated_env + " is deprecated; use " + \ + new_env + " instead"); + +// Turns at::IntArrayRef into "(1, 2, 3, 4)". +inline std::string toString(at::IntArrayRef l) { + std::stringstream ss; + ss << "("; + for (const auto i : c10::irange(l.size())) { + if (i > 0) { + ss << ", "; + } + ss << l[i]; + } + ss << ")"; + return ss.str(); +} + +inline std::string toString(const c10::Layout& layout) { + std::stringstream ss; + ss << layout; + return ss.str(); +} + +inline void assertSameType( + const at::DeprecatedTypeProperties& type, + const std::vector& tensors) { + for (const auto i : c10::irange(tensors.size())) { + if (!tensors[i].options().type_equal(type.options())) { + const std::string expected = type.toString(); + const std::string actual = tensors[i].toString(); + throw std::invalid_argument( + // NOLINTNEXTLINE(performance-inefficient-string-concatenation) + "mixed types (" + expected + " and " + actual + ")"); + } + } +} + +inline std::vector split( + char separator, + const std::string& string) { + std::vector pieces; + std::stringstream ss(string); + std::string item; + while (std::getline(ss, item, separator)) { + pieces.push_back(std::move(item)); + } + return pieces; +} + +inline std::string getCvarString( + const std::vector& env, + const char* def) { + const char* ret = def; + + if (env.empty()) { + TORCH_CHECK(false, "No environment variables passed"); + return ret; + } + + /* parse environment variable in reverse order, so the early + * versions of a variable get higher priority than the latter + * versions of the same variable */ + for (ssize_t i = static_cast(env.size()) - 1; i >= 0; i--) { + const char* val = std::getenv(env[i].c_str()); + if (val == nullptr) { + continue; + } else if (i) { + WARN_ENV_VAR_ONCE(env[i], env[0]); + } + + ret = val; + } + + return ret; +} + +inline int getCvarInt(const std::vector& env, int def) { + int ret = def; + + if (env.empty()) { + TORCH_CHECK(false, "No environment variables passed"); + return ret; + } + + /* parse environment variable in reverse order, so the early + * versions of a variable get higher priority than the latter + * versions of the same variable */ + for (ssize_t i = static_cast(env.size()) - 1; i >= 0; i--) { + char* val = std::getenv(env[i].c_str()); + if (val == nullptr) { + continue; + } else if (i) { + WARN_ENV_VAR_ONCE(env[i], env[0]); + } + + try { + ret = std::stoi(val); + } catch (std::exception&) { + TORCH_CHECK(false, "Invalid value for environment variable: " + env[i]); + } + } + + return ret; +} + +inline bool getCvarBool(const std::vector& env, bool def) { + bool ret = def; + + if (env.empty()) { + TORCH_CHECK(false, "No environment variables passed"); + return ret; + } + + /* parse environment variable in reverse order, so the early + * versions of a variable get higher priority than the latter + * versions of the same variable */ + for (ssize_t i = static_cast(env.size()) - 1; i >= 0; i--) { + char* val_ = std::getenv(env[i].c_str()); + if (val_ == nullptr) { + continue; + } else if (i) { + WARN_ENV_VAR_ONCE(env[i], env[0]); + } + + std::string val = std::string(val_); + for (auto& x : val) { + // NOLINTNEXTLINE(*-narrowing-conversions) + x = std::tolower(x); + } + + if (val == "y" || val == "yes" || val == "1" || val == "t" || + val == "true") { + ret = true; + } else if ( + val == "n" || val == "no" || val == "0" || val == "f" || + val == "false") { + ret = false; + } else { + TORCH_CHECK(false, "Invalid value for environment variable: " + env[i]); + return ret; + } + } + + return ret; +} + +inline void assertSameSizes( + const at::IntArrayRef& sizes, + const std::vector& tensors) { + for (const auto i : c10::irange(tensors.size())) { + if (!tensors[i].sizes().equals(sizes)) { + const auto expected = toString(sizes); + const auto actual = toString(tensors[i].sizes()); + throw std::invalid_argument( + // NOLINTNEXTLINE(performance-inefficient-string-concatenation) + "mixed sizes (" + expected + " and " + actual + ")"); + } + } +} + +inline void assertSameSizeAndType(const std::vector& tensors) { + // Ensure we have at least one tensor + if (tensors.empty()) { + throw std::invalid_argument("argument is empty"); + } + + // Ensure all tensors have identical type and shape + auto options = tensors[0].options(); + auto sizes = tensors[0].sizes(); + for (const auto i : c10::irange(1, tensors.size())) { + if (!tensors[i].options().type_equal(options)) { + const auto expected = toString(options); + const auto actual = toString(tensors[i].options()); + throw std::invalid_argument( + // NOLINTNEXTLINE(performance-inefficient-string-concatenation) + "argument contains mixed types (" + expected + " and " + actual + + ")"); + } + if (!tensors[i].sizes().equals(sizes)) { + const auto expected = toString(sizes); + const auto actual = toString(tensors[i].sizes()); + throw std::invalid_argument( + // NOLINTNEXTLINE(performance-inefficient-string-concatenation) + "argument contains mixed types (" + expected + " and " + actual + + ")"); + } + } +} + +inline void assertTypeMatch( + const std::function& fn, + const at::DeprecatedTypeProperties& type, + const at::ArrayRef tensors, + size_t index) { + if (!tensors[index].options().type_equal(type.options())) { + fn("invalid tensor type at index " + std::to_string(index) + " (expected " + + type.toString() + ", got " + tensors[index].toString() + ")"); + } +} + +inline void assertTypeMatch( + const std::function& fn, + const at::TensorOptions& options, + const at::ArrayRef tensors, + size_t index) { + if (!tensors[index].options().type_equal(options)) { + fn("invalid tensor type at index " + std::to_string(index) + " (expected " + + toString(options) + ", got " + toString(tensors[index].options()) + ")"); + } +} + +inline void assertSizesMatch( + const std::function& fn, + const at::IntArrayRef& sizes, + const at::ArrayRef tensors, + size_t index) { + if (tensors[index].sizes() != sizes) { + fn("invalid tensor size at index " + std::to_string(index) + " (expected " + + toString(sizes) + ", got " + toString(tensors[index].sizes()) + ")"); + } +} + +inline void assertLayoutMatch( + const std::function& fn, + const c10::Layout& expected, + const at::ArrayRef tensors, + size_t index) { + const auto& actual = tensors[index].layout(); + if (actual != expected) { + fn("invalid tensor layout at index " + std::to_string(index) + + " (expected " + toString(expected) + ", got " + toString(actual) + ")"); + } +} + +inline void assertLayoutMatch( + const std::function& fn, + const at::ArrayRef tensors) { + const auto& layout = tensors[0].layout(); + for (const auto i : c10::irange(1, tensors.size())) { + assertLayoutMatch(fn, layout, tensors, i); + } +} + +inline void assertNonEmpty( + const std::function& fn, + const at::ArrayRef tensors) { + if (tensors.empty()) { + fn("requires non-empty tensor list"); + } +} + +inline void assertSingleElement( + const std::function& fn, + const at::ArrayRef tensors) { + if (tensors.size() != 1) { + fn("requires a single-element tensor list"); + } +} + +inline void assertSingleElementInput( + const std::function& fn, + const at::ArrayRef tensors) { + if (tensors.size() != 1) { + fn("requires a single-element input tensor list"); + } +} + +inline void assertSingleElementOutput( + const std::function& fn, + const at::ArrayRef tensors) { + if (tensors.size() != 1) { + fn("requires a single-element output tensor list"); + } +} + +inline void assertRootRank( + const std::function& fn, + int64_t rank, + int64_t size) { + if (rank < 0 || rank >= size) { + fn("invalid root rank: " + std::to_string(rank)); + } +} + +inline void assertRootTensor( + const std::function& fn, + int64_t rank, + int64_t size) { + if (rank < 0 || rank >= size) { + fn("invalid root tensor: " + std::to_string(rank)); + } +} + +inline void assertDense( + const std::function& fn, + const at::ArrayRef tensors) { + const auto& layout = tensors[0].layout(); + if (layout != at::kStrided) { + fn("only supports dense tensors"); + } +} + +inline void assertCPU( + const std::function& fn, + const at::ArrayRef tensors) { + const auto& device = tensors[0].device(); + if (device.type() != at::kCPU) { + fn("only supports CPU tensors"); + } +} + +inline void assertSameDevice( + const std::function& fn, + const at::ArrayRef tensors) { + if (tensors.size() < 2) { + return; + } + const auto& device = tensors[0].device(); + for (const auto i : c10::irange(1, tensors.size())) { + if (tensors[i].device() != device) { + fn("tensors should be on the same device"); + } + } +} + +inline void assertTypeAndSizesMatch( + const std::function& fn, + const at::ArrayRef tensors, + const at::DeprecatedTypeProperties& type, + const at::IntArrayRef& sizes) { + for (const auto i : c10::irange(tensors.size())) { + assertTypeMatch(fn, type, tensors, i); + assertSizesMatch(fn, sizes, tensors, i); + } +} + +inline void assertTypeAndSizesMatch( + const std::function& fn, + const at::ArrayRef tensors, + const at::TensorOptions& options, + const at::IntArrayRef& sizes) { + for (const auto i : c10::irange(tensors.size())) { + assertTypeMatch(fn, options, tensors, i); + assertSizesMatch(fn, sizes, tensors, i); + } +} + +inline void assertTypeAndSizesMatch( + const std::function& fn, + const at::ArrayRef tensors) { + const auto& options = tensors[0].options(); + const auto sizes = tensors[0].sizes(); + assertTypeAndSizesMatch(fn, tensors.slice(1), options, sizes); +} + +// Copied from ATen/core/functional.h. +template +inline auto fmap(T& inputs, const F& fn) + -> std::vector { + std::vector r; + r.reserve(inputs.size()); + for (auto& input : inputs) { + r.push_back(fn(input)); + } + return r; +} + +// Copied from torch/csrc/utils/tensor_flatten.h. +inline at::Tensor flattenDenseTensors(at::TensorList tensors) { + static const auto flatten = [](const at::Tensor& t) { + return t.contiguous().view({-1}); + }; + if (tensors.size() == 1) { + return flatten(tensors[0]); + } + return at::cat(::c10d::fmap(tensors, flatten)); +} + +inline at::Tensor newLikeFlat( + std::vector>& tensors, + size_t deviceIdx) { + if (tensors.empty() || tensors[0].empty()) { + TORCH_CHECK(false, "Received an empty list"); + } + if (deviceIdx >= tensors.size()) { + TORCH_CHECK(false, "Invalid device index"); + } + auto& t = tensors[deviceIdx][0]; + auto device = t.device(); + for (const auto i : c10::irange(1, tensors[deviceIdx].size())) { + if (tensors[deviceIdx][i].device() != device) { + TORCH_CHECK(false, "Expecting all tensors on the same device"); + } + } + at::DeviceGuard gpuGuard(device); + std::vector sizes{static_cast(tensors[deviceIdx].size())}; + std::vector strides{static_cast(t.numel())}; + sizes.insert(sizes.end(), t.sizes().begin(), t.sizes().end()); + strides.insert(strides.end(), t.strides().begin(), t.strides().end()); + return at::empty_strided( + sizes, strides, t.options().memory_format(std::nullopt)); +} + +inline at::Tensor newLikeFlat(std::vector& tensors) { + if (tensors.empty()) { + TORCH_CHECK(false, "Received an empty list"); + } + auto& t = tensors[0]; + at::DeviceGuard gpuGuard(t.device()); + std::vector sizes{static_cast(tensors.size())}; + sizes.insert(sizes.end(), t.sizes().begin(), t.sizes().end()); + return at::empty(sizes, t.options()); +} + +inline std::vector> getSizes( + const std::vector& tensors) { + std::vector> sizes(tensors.size()); + for (const auto i : c10::irange(tensors.size())) { + sizes[i] = tensors[i].sizes().vec(); + } + return sizes; +} + +inline std::vector getDevices(const std::vector& tensors) { + std::vector devices(tensors.size(), -1); + if (tensors[0].device().is_cuda()) { + for (const auto i : c10::irange(tensors.size())) { + // NOLINTNEXTLINE(bugprone-signed-char-misuse) + devices[i] = tensors[i].storage().device().index(); + } + } + return devices; +} + +template +inline T* getDataPointer(const at::Tensor& tensor) { + // This method is only used in ProcessGroupGloo for now. Call sites must make + // sure that the input tensor is contiguous. It is OK if the tensor does not + // start from the beginning of the storage. For example, it could come from + // chunk(..., dim=0)[1]. Hence, we need to use data_ptr() instead of + // tensor.storage().data() + // NB: not using tensor.data() because tensor is not aware of gloo::TYPE + return static_cast(tensor.data_ptr()); +} + +template +std::vector getDataPointers(const std::vector& tensors) { + std::vector ptrs(tensors.size()); + for (const auto i : c10::irange(tensors.size())) { + ptrs[i] = getDataPointer(tensors[i]); + } + return ptrs; +} + +// For alltoall split size sanity check +inline void checkSplitSizes( + const std::vector& split_sizes, + const at::Tensor& tensor, + int group_size) { + if (split_sizes.empty()) { + TORCH_CHECK( + tensor.size(0) % group_size == 0, + "Tensor's dim 0 does not divide equally across group size"); + } else { + TORCH_CHECK( + split_sizes.size() == static_cast(group_size), + "Number of tensor splits not equal to group size"); + const auto sum = c10::sum_integers(split_sizes); + TORCH_CHECK( + sum == tensor.size(0), "Split sizes doesn't match total dim 0 size"); + } +} + +// Compute alltoall lengths and offsets, handling multi-dimension tensors +template +size_t computeLengthsAndOffsets( + const std::vector& split_sizes, + const at::Tensor& tensor, + std::vector* lengths, + std::vector* offsets) { + size_t group_size = lengths->size(); + bool equal_splits = false; + size_t dim0_size = tensor.size(0); + size_t row_size = (dim0_size ? tensor.numel() / dim0_size : 1); + size_t split_size = 0; + size_t offset = 0; + + if (split_sizes.empty()) { + equal_splits = true; + split_size = tensor.size(0) / group_size; + } + for (const auto i : c10::irange(group_size)) { + size_t length = row_size * (equal_splits ? split_size : split_sizes[i]); + (*lengths)[i] = length; + (*offsets)[i] = offset; + // TODO: see if we should add overflow protection for offset + offset += length; + } + return offset; +} + +template +size_t computeLengthsAndOffsets( + const std::vector& tensors, + std::vector* lengths, + std::vector* offsets) { + size_t group_size = lengths->size(); + size_t offset = 0; + for (const auto i : c10::irange(group_size)) { + size_t length = tensors[i].numel(); + (*lengths)[i] = length; + (*offsets)[i] = offset; + offset += length; + } + return offset; +} + +using RankType = uint32_t; +using SizeType = uint64_t; + +// `errno` is only meaningful when it fails. E.g., a successful `fork()` sets +// `errno` to `EINVAL` in child process on some macos +// (https://stackoverflow.com/a/20295079), and thus `errno` should really only +// be inspected if an error occurred. +// +// `success_cond` is an expression used to check if an error has happend. So for +// `fork()`, we can use `SYSCHECK(pid = fork(), pid != -1)`. The function output +// is stored in variable `__output` and may be used in `success_cond`. +#ifdef _WIN32 +#define SYSCHECK(expr, success_cond) \ + while (true) { \ + auto __output = (expr); \ + auto errno_local = WSAGetLastError(); \ + (void)__output; \ + if (!(success_cond)) { \ + if (errno == EINTR) { \ + continue; \ + } else if ( \ + errno_local == WSAETIMEDOUT || errno_local == WSAEWOULDBLOCK) { \ + C10_THROW_ERROR(DistNetworkError, "Socket Timeout"); \ + } else { \ + C10_THROW_ERROR(DistNetworkError, std::strerror(errno_local)); \ + } \ + } else { \ + break; \ + } \ + } +#else +#define SYSCHECK(expr, success_cond) \ + while (true) { \ + auto __output = (expr); \ + (void)__output; \ + if (!(success_cond)) { \ + if (errno == EINTR) { \ + continue; \ + } else if (errno == EAGAIN || errno == EWOULDBLOCK) { \ + C10_THROW_ERROR(DistNetworkError, "Socket Timeout"); \ + } else { \ + C10_THROW_ERROR(DistNetworkError, std::strerror(errno)); \ + } \ + } else { \ + break; \ + } \ + } +#endif + +// Most functions indicate error by returning `-1`. This is a helper macro for +// this common case with `SYSCHECK`. +// Since SOCKET_ERROR = -1 in MSVC, so also leverage SYSCHECK_ERR_RETURN_NEG1 +#define SYSCHECK_ERR_RETURN_NEG1(expr) SYSCHECK(expr, __output != -1) + +namespace tcputil { + +// Send and receive +template +void sendBytes( + int socket, + const T* buffer, + size_t length, + bool moreData = false) { + size_t bytesToSend = sizeof(T) * length; + if (bytesToSend == 0) { + return; + } + + auto currentBytes = reinterpret_cast(buffer); + + int flags = 0; + +#ifdef MSG_MORE + if (moreData) { // there is more data to send + flags |= MSG_MORE; + } +#endif + +// Ignore SIGPIPE as the send() return value is always checked for error +#ifdef MSG_NOSIGNAL + flags |= MSG_NOSIGNAL; +#endif + + while (bytesToSend > 0) { + ssize_t bytesSent = 0; + SYSCHECK_ERR_RETURN_NEG1( + bytesSent = ::send(socket, currentBytes, bytesToSend, flags)) + if (bytesSent == 0) { + C10_THROW_ERROR(DistNetworkError, "failed to send, sent 0 bytes"); + } + + bytesToSend -= bytesSent; + currentBytes += bytesSent; + } +} + +template +void recvBytes(int socket, T* buffer, size_t length) { + size_t bytesToReceive = sizeof(T) * length; + if (bytesToReceive == 0) { + return; + } + + auto currentBytes = reinterpret_cast(buffer); + + while (bytesToReceive > 0) { + ssize_t bytesReceived = 0; + SYSCHECK_ERR_RETURN_NEG1( + bytesReceived = recv(socket, currentBytes, bytesToReceive, 0)) + if (bytesReceived == 0) { + C10_THROW_ERROR(DistNetworkError, "failed to recv, got 0 bytes"); + } + + bytesToReceive -= bytesReceived; + currentBytes += bytesReceived; + } +} + +// send a vector's length and data +template +void sendVector(int socket, const std::vector& vec, bool moreData = false) { + SizeType size = vec.size(); + sendBytes(socket, &size, 1, true); + sendBytes(socket, vec.data(), size, moreData); +} + +// receive a vector as sent in sendVector +template +std::vector recvVector(int socket) { + SizeType valueSize = 0; + recvBytes(socket, &valueSize, 1); + std::vector value(valueSize); + recvBytes(socket, value.data(), value.size()); + return value; +} + +// this is only for convenience when sending rvalues +template +void sendValue(int socket, const T& value, bool moreData = false) { + sendBytes(socket, &value, 1, moreData); +} + +template +T recvValue(int socket) { + T value; + recvBytes(socket, &value, 1); + return value; +} + +// send a string's length and data +inline void sendString( + int socket, + const std::string& str, + bool moreData = false) { + SizeType size = str.size(); + sendBytes(socket, &size, 1, true); + sendBytes(socket, str.data(), size, moreData); +} + +// receive a string as sent in sendString +inline std::string recvString(int socket) { + SizeType valueSize = 0; + recvBytes(socket, &valueSize, 1); + std::vector value(valueSize); + recvBytes(socket, value.data(), value.size()); + return std::string(value.data(), value.size()); +} + +} // namespace tcputil +} // namespace c10d diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Work.hpp b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Work.hpp new file mode 100644 index 0000000000000000000000000000000000000000..c10e5007b9f54495cec904c1d72d2247a70e60b1 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Work.hpp @@ -0,0 +1,165 @@ +#pragma once + +#include +#include +#include +#include + +constexpr auto kNoTimeout = std::chrono::milliseconds(0); + +namespace c10d { + +constexpr const char* const kSeqNumStoreKey = "SEQ_NUM_STORE_KEY"; + +enum class OpType : std::uint8_t { + BROADCAST = 0, + ALLREDUCE = 1, + ALLREDUCE_COALESCED = 2, + REDUCE = 3, + ALLGATHER = 4, + _ALLGATHER_BASE = 5, + ALLGATHER_COALESCED = 6, + GATHER = 7, + SCATTER = 8, + REDUCE_SCATTER = 9, + ALLTOALL_BASE = 10, + ALLTOALL = 11, + SEND = 12, + RECV = 13, + RECVANYSOURCE = 14, + BARRIER = 15, + _REDUCE_SCATTER_BASE = 16, + COALESCED = 17, + _ALLREDUCE_SPARSE = 18, + UNKNOWN = 100, +}; + +// Converts OpType to human readable string. +TORCH_API std::string opTypeToString(OpType opType); + +// Whether or not an OP is an p2p op (SEND, RECV, RECVANYSOURCE) +TORCH_API bool isP2POp(OpType opType, bool batchP2P = false); + +// Please do not use Work API, it is going away, to be +// replaced by ivalue::Future. +// Python binding for this class might change, please do not assume +// this will be bound using pybind. +class TORCH_API Work : public torch::CustomClassHolder { + public: + Work( + int rank = -1, + OpType opType = OpType::UNKNOWN, + const char* profilingTitle = nullptr, + const std::optional>& inputTensors = + std::nullopt); + + ~Work() override; + + // Checks if request has completed. Non-blocking operation. + virtual bool isCompleted(); + + // Returns if the work completed successfully. + // If false, the exception function can be called to get details. + virtual bool isSuccess() const; + + // Returns exception if isSuccess() returned false. + virtual std::exception_ptr exception() const; + + // Returns source rank if this objects represents a recv-from-any. + virtual int sourceRank() const; + + // Returns result tensors, if applicable. + // If work is not supposed to have result, we return empty list. + virtual std::vector result(); + + // Ensures that operations on the output tensors that are invoked + // after this function returns are correctly sequenced after the + // asynchronous completion of this work. + // + // For CUDA tensors, it inserts stream synchronization such that + // the streams of the caller wait for completion of the + // asynchronous operations on the destination tensors. + // + // For CPU tensors, it is currently a nop. + // + // This function should only be used if the caller polls for + // completion through the `isCompleted` function, it has returned + // true, and the `isSuccess` function also has returned true. + // + virtual void synchronize(); + + // Waits until request completes. Blocking operation. + // Throws if the work completed with an exception. + // Returns false if the work is aborted. + // Otherwise, it always returns true, indicating the work is completed. + // + // Functionally equivalent to: + // + // while (!isCompleted()) { /* nop */ } + // auto success = isSuccess(); + // if (!success) { std::rethrow_exception(exception()); } + // return success; + // + virtual bool wait(std::chrono::milliseconds timeout = kNoTimeout); + + virtual void abort(); + + // Returns a Future object that will be associated with the completion of + // work. Only NCCL backend is currently supported. + virtual c10::intrusive_ptr getFuture(); + + virtual float getDuration() const; + + virtual uint64_t getSequencenumber() const; + + OpType retrieveOpType() const; + + static c10::intrusive_ptr create_from_future( + const c10::intrusive_ptr&); + + protected: + // Completes the work object and optionally sets the exception in a + // thread-safe manner. Notifies all waiting condition variables as well. + void finish(std::exception_ptr exception = nullptr); + + // Similar to finish, but throws an exception if one is already set or + // provided by the user. + void finishAndThrow(std::exception_ptr exception); + + mutable std::mutex mutex_; + std::condition_variable cv_; + bool completed_ = false; + std::exception_ptr exception_; + + // Current rank of the node. + const int rank_; + + // Operation type that this work object refers to. + OpType opType_; + + // When profiling, the callback to record end of operation event. This + // callback needs to be called when collective operation is complete. + std::function recordFunctionEndCallback_; +}; + +struct TORCH_API WorkInfo { + WorkInfo( + const OpType& opType, + const uint64_t seq, + const std::chrono::time_point& timeStarted, + const std::chrono::time_point& timeFinished, + const std::chrono::duration& activeDuration) + : opType(opType), + seq(seq), + timeStarted(timeStarted), + timeFinished(timeFinished), + activeDuration(activeDuration) {} + + OpType opType; + uint64_t seq; + std::chrono::time_point timeStarted; + std::chrono::time_point timeFinished; + std::chrono::duration activeDuration; +}; + +} // namespace c10d diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/debug.h b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/debug.h new file mode 100644 index 0000000000000000000000000000000000000000..8524191515190083dc1f3063405533bd2782e315 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/debug.h @@ -0,0 +1,23 @@ +// Copyright (c) Meta Platforms, Inc. and its affiliates. +// All rights reserved. +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once + +#include + +namespace c10d { + +enum class DebugLevel { Off = 0, Info = 1, Detail = 2 }; + +TORCH_API void setDebugLevel(DebugLevel level); + +// Sets the debug level based on the value of the `TORCH_DISTRIBUTED_DEBUG` +// environment variable. +TORCH_API void setDebugLevelFromEnvironment(); + +TORCH_API DebugLevel debug_level() noexcept; + +} // namespace c10d diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/error.h b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/error.h new file mode 100644 index 0000000000000000000000000000000000000000..fff2b45c4c952b99b3ba2f27696cb6d2b9c29326 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/error.h @@ -0,0 +1,56 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// All rights reserved. +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once + +#include +#include + +#include + +namespace fmt { + +template <> +struct formatter { + constexpr decltype(auto) parse(format_parse_context& ctx) const { + return ctx.begin(); + } + + template + decltype(auto) format(const std::error_category& cat, FormatContext& ctx) + const { + if (std::strcmp(cat.name(), "generic") == 0) { + return fmt::format_to(ctx.out(), "errno"); + } else { + return fmt::format_to(ctx.out(), "{} error", cat.name()); + } + } +}; + +template <> +struct formatter { + constexpr decltype(auto) parse(format_parse_context& ctx) const { + return ctx.begin(); + } + + template + decltype(auto) format(const std::error_code& err, FormatContext& ctx) const { + return fmt::format_to( + ctx.out(), "({}: {} - {})", err.category(), err.value(), err.message()); + } +}; + +} // namespace fmt + +namespace c10d { +namespace detail { + +inline std::error_code lastError() noexcept { + return std::error_code{errno, std::generic_category()}; +} + +} // namespace detail +} // namespace c10d diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer.hpp b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer.hpp new file mode 100644 index 0000000000000000000000000000000000000000..aa3c40ae95bbf2f453d50712a108799d6eba72fe --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/reducer.hpp @@ -0,0 +1,587 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef _WIN32 +#include +#endif + +namespace c10d { + +constexpr int kDefaultFirstBucketBytes = int(1024 * 1024); +constexpr int kDefaultBucketBytesCap = int(25 * 1024 * 1024); +// Collect runtime stats once for every kDDPRuntimeLoggingSampleRate iterations. +constexpr int kDDPRuntimeLoggingSampleRate = 100; + +// Forward declaration +class Logger; + +// Local accumulator type for a single bucket. +struct BucketAccumulator { + std::vector indices; + size_t size = 0; + size_t size_limit = 0; +}; + +class TORCH_API Reducer { + public: + // The constructor takes a list of variables (i.e. parameters) for this + // process's single model replica (as DDP assumes single-process + // single-device). The bucket assignment for this reducer, `bucket_indices`, + // is specified as a list of buckets, each of which is specified as a list of + // indices into the bucket's `variables` list. + explicit Reducer( + std::vector params, + std::vector> bucket_indices, + const std::vector& per_bucket_size_limits, + c10::intrusive_ptr process_group, + std::vector expect_sparse_gradients, + int64_t bucket_bytes_cap, + bool find_unused_parameters, + bool gradient_as_bucket_view, + std::unordered_map param_names, + int64_t first_bucket_bytes_cap); + + ~Reducer() noexcept(false); + + // To (re-)initialize bucket assignment, pass a list of buckets, each of + // which is specified by a list of indices in the bucket's `variables` list. + // This function performs validation that the variables within a bucket + // all live on the same device and have the same dimensionality. + void initialize_buckets(std::vector> bucket_indices); + + void autograd_hook(size_t index); + + // This function is called when the forward function has produced an output, + // and the user wishes to reduce gradients in the backwards pass. + // If they don't, and wish to accumulate gradients before reducing them, + // a call to this function can simply be omitted. + void prepare_for_backward(const std::vector& outputs); + + // Called at the beginning of forward() inside DistributedDataParallel, + // right now it captures the starting time of forward in each iteration. + void prepare_for_forward(); + + // Returns the relative time in nanoseconds when gradients were ready, + // with respect to the time `prepare_for_backward` was called. The + // vector is for parameters for a single model replica. + std::vector get_backward_stats() const { + return backward_stats_; + } + + // Registers a hook to the reducer. The hook is `CommHookInterface` + // type to allow both Python and CPP hooks. This function can only + // be called once before calling backward. + // Cannot combine with the call of `register_builtin_comm_hook`. + void register_comm_hook(std::unique_ptr iface); + + // Registers a built-in C++ comm hook to the reducer. This function can only + // be called once before calling backward. + // Cannot combine with the call of `register_comm_hook`. + void register_builtin_comm_hook(c10d::BuiltinCommHookType comm_hook_type); + + // Informs reducer that optimizer is running in backward, so gradients + // don't need to be copied from buckets as the optimizer would've already + // been applied. + void set_optimizer_in_backward() { + optim_in_backward_ = true; + }; + + // Runs allreduce or installed communication hook given GradBucket instance. + c10::intrusive_ptr run_comm_hook( + GradBucket& grad_bucket); + + // Runs default allreduce hook. + c10::intrusive_ptr run_allreduce_hook( + GradBucket& grad_bucket); + + // Returns gradient buckets in sequential order of buckets_. This is the order + // in which buckets are reduced across processes. If return_zero_tensors=true, + // will return zero tensors of the same shape instead of the true tensors. + std::vector get_grad_buckets( + bool return_zero_tensors = true) const; + + // Rebuild buckets based on rebuilt_params_ and rebuilt_param_indices_ + // according to when tensors received grads in the backward pass. + // TODO this function makes broadcast communication call and + // could be overlapped with next forward() call, thus + // it could be async. Will make it async when rebuilding buckets for + // find_unused_parameters = true case, as we could rebuild buckets more than + // once for find_unused_parameters = true case, where subgraphs are trained + // and parameter indices order may change more frequently. + // For find_unused_parameters = false case, buckets are only rebuilt once, + // the performance cost is negligible. Returns true if the buckets were + // rebuilt. + bool rebuild_buckets(); + + void setSparseMetadata(std::map& metadata); + + // Install futures that should be awaited at end of backwards. Currently these + // are only used by user-defined custom buffer reduction hooks, but can be + // generalized to any user-originating futures that need to be awaited. + void install_futures(c10::List> futs); + + // Returns true if we should rebuild buckets, else false. We only rebuild + // buckets once after the first iteration and never rebuild them if + // find_unused_parameters_. + inline bool should_rebuild_buckets() const { + return (static_graph_ || !find_unused_parameters_) && !has_rebuilt_bucket_; + } + + // Pushes all parameters to be rebuilt. + void push_rebuilt_params_for_all_indices(); + + // Creates and sets ForwardPassWorkHandle given a Work and the + // corresponding tensor being reduced. + void set_forward_pass_work_handle( + c10::intrusive_ptr forwardPassWorkHandle, + bool useStaticWorldSize); + + // Retrieve on-device tensors used to track locally unused parameters. It is + // a tensor where index i = 1 if the Variable with that index has been used. + at::Tensor get_local_used_map_on_device() const; + + // An function for users to set sample_rate of collecting + // runtime stats. The time stats will be recorded for the + // first 10 iterations, after 10 iterations time stats will be + // recorded once every "sample_rate" training iterations. + void set_ddp_runtime_logging_sample_rate(int sample_rate); + + // Specify the training graph is static. + void set_static_graph(); + + // Delay all reduce to be after all gradients' calculation is complete. + void delay_all_reduce(); + + void set_mixed_precision_param_dtype(c10::ScalarType dtype); + + // Weak reference to associated DDP logger. The reference is weak to avoid + // refcycle between reducer and logger. + void set_logger(std::weak_ptr logger); + + // When graph is not explicitly set by user as static and has unused + // parameters, this will return whether the graph has been static until the + // current iteration, which means unused params set has not changed. + bool ddp_graph_static(); + + // Removes autograd hooks registered by the Reducer on the model parameters. + void remove_autograd_hooks(); + + // Checks whether or not the reducer has finalized the current backward + // iteration. + void check_finalized(); + + // Updates the underlying process group used by DDP with the new process + // group. + void update_process_group( + c10::intrusive_ptr new_process_group); + + // Resets reducer state. + void reset_state(); + + protected: + // Forward declaration. + struct Bucket; + + void push_rebuilt_params(const size_t& index); + + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + mutable std::mutex mutex_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const std::vector params_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + c10::intrusive_ptr<::c10d::ProcessGroup> process_group_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::vector expect_sparse_gradients_; + + std::vector> + grad_accumulators_; // NOLINT(cppcoreguidelines-non-private-member-variables-in-classes) + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::unordered_map gradAccToVariableMap_; + std::vector>> + hooks_; // NOLINT(cppcoreguidelines-non-private-member-variables-in-classes) + + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool expect_autograd_hooks_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool require_finalize_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + size_t next_bucket_; + + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool has_marked_unused_parameters_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const bool find_unused_parameters_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const bool gradient_as_bucket_view_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::vector unused_parameters_; + // Previous iteration's unused params, used for checking if unused parameters + // change between iterations. Only filled during the first backwards call. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::vector prev_iteration_unused_parameters_; + // Whether graph is static or not. When user does not explicitly set static + // graph, the only possible dynamism is set of unused parameters changing + // between iterations which is tracked by this flag. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool ddp_graph_static_{true}; + // Locally used parameter maps indicating if parameters are used locally + // during the current iteration or no_sync session if no_sync is on. + // Each map is a one-dim int32 tensor of number of parameters. These tensors + // are marked in autograd_hook to indicate the corresponding param has been + // used, and get allreduced in the end of backward step of current iteration + // or no_sync session for figuring out the globally unused parameters. + // + // local_used_map_: CPU tensor for bookkeeping locally used params + // local_used_map_dev_: dev tensor for reducing globally unused params + at::Tensor local_used_map_; + at::Tensor local_used_map_dev_; + // Indicate that reduction is done and D2H copy is done as well. + bool local_used_map_reduced_; + + // Weak pointer to associated DDP logger. + std::weak_ptr logger_; + // List of futures installed by Reducer::install_futures that should be + // awaited at the end of backwards pass. + std::optional>> + installed_futures_{std::nullopt}; + // Mixed precision parameter dtype for bucket type checking. + std::optional mixed_precision_param_dtype_{std::nullopt}; + + // Work handle for allreduce on local_used_map_ + c10::intrusive_ptr local_used_work_; + + void mark_variable_ready_dense(size_t variable_index); + + void mark_variable_ready_sparse(size_t variable_index); + + void mark_variable_ready(size_t variable_index); + + void mark_bucket_ready(size_t bucket_index); + + void finalize_bucket_dense(Bucket& bucket); + + void finalize_backward(); + + // Returns list of model parameters corresponding to the given bucket. + // bucket_index is a key to cache after buckets are rebuilt, after which this + // mapping never changes. + std::vector get_variables_for_bucket( + size_t bucket_index, + const Bucket& bucket) const; + + // Asserts that the reduction for the previous iteration has finished before + // rebuilding buckets or kicking off the next one. + void ensure_prior_reduction_finished(); + + // Broadcast rebuilt buckets from rank 0 to other ranks before initializing + // the buckets + void sync_bucket_indices(std::vector>& bucket_indices); + + // We'd like to use DistAutogradContext::GradCallback here but dist autograd + // doesn't exist under Windows. So we just directly use the concrete type but + // to preserve and enforce our original intent we do a static assert when dist + // autograd is available. + using GradCallback = std::function; +#ifndef _WIN32 + static_assert( + std::is_same_v< + GradCallback, + torch::distributed::autograd::DistAutogradContext::GradCallback>); +#endif + void runGradCallbackForVariable(at::Tensor& variable, GradCallback&& cb); + + // This function is called inside `initialize_buckets()`. It initializes both + // `bucket_views_in` and `bucket_views_out` with views for each variable's + // gradient into the bucket's flattened `gradients` tensor. Views serve as + // entry points to `copy_()` each grad's data in/out of the flattened + // `gradients` tensor. + void initialize_bucket_views(Bucket& bucket); + + // This function is called inside `finalize_backward`, it happens only if + // DDP communication hook was registered to recreate just bucket_views_out + // with the result of `future_work`. + void populate_bucket_views_out(Bucket& bucket, at::Tensor& tensor); + + // If gradient_as_bucket_view_ is false, after allreduce buckets, + // copy bucket results back to grads. + void copy_bucket_to_grad( + at::Tensor& variable, + Reducer::Bucket& bucket, + size_t intra_bucket_index, + bool global_unused); + // Check layout of grad and bucket_view before copying the grad to bucket. + void check_grad_layout(const at::Tensor& grad, const at::Tensor& bucket_view); + + // A bucket contains [1..N] gradients to be reduced, where the gradients + // have the same dtype and device. + // Coalescing gradients together before reducing can result in lower overhead + // and/or faster time to completion. Coalescing requires the constituent + // gradients to have the same dtype and device, and the resulting flattened + // tensor uses that common dtype and device. The flattened tensor is filled + // as the corresponding gradients are computed (triggered by autograd hooks), + // and the buckets are reduced in a predetermined order consistent across + // processes. + struct Bucket { + // Gradients of the bucket flattened into a 1-dimensional tensor + at::Tensor gradients; + + // Views into the `gradients` tensor for each individual gradient + // Each view is created with layout (size and stride) matching the + // gradient's expected layout (see the "Gradient Layout Contract" in + // torch/csrc/autograd/functions/accumulate_grad.h). + // `bucket_views_in[i].copy_(grad)` and `grad.copy_(bucket_views_out[i])` + // provide convenient ways to copy gradient data in/out of `gradients`, + // respectively. + // We keep both `bucket_views_in` and `bucket_views_out` because + // registering a DDP communication hook may re-initialize + // `bucket_views_out` with the value of the hook's `future_work` but we + // still need separate views into the bucket's original flattened gradient + // to copy in gradient data. + std::vector bucket_views_in; + std::vector bucket_views_out; + + // Variables whose gradients are held in this bucket + // We use refcounted tensors here so that we can easily unflatten the + // bucket's flattened `gradients` tensor into the participating variables + // after reduction has completed. + std::vector variables; + + // Per-variable offset/length into the flattened `gradients` tensor and + // the corresponding `GradBucket` instance for communication hooks + std::vector offsets; + std::vector lengths; + + // Per-variable sizes slicing into the bucket's `gradients` tensor + std::vector sizes_vec; + + // Number of gradients left to be computed before the bucket is ready to + // be reduced + size_t pending; + + // Global indices of participating variables in the bucket + std::vector variable_indices; + + // Future work handle for DDP communication hook + // If no hook is registered, a temporary vanilla allreduce hook is used. + c10::intrusive_ptr future_work; + + // If this bucket should expect a single sparse gradient + // If `true`, then this implies that `bucket.variables.size() == 1`. + bool expect_sparse_gradient = false; + + // Sparse indices tensor + std::optional sparse_tensor_indices = std::nullopt; + + // TODO(@pietern) + // Memory copies from gradient tensors into the bucket are potentially + // done on different CUDA streams. We record an event for every copy + // so that we can synchronize with them prior to kicking off the reduction. + // std::vector events; + }; + + std::vector buckets_; + + // A variable locator locates a particular variable in the reducer's buckets + struct VariableLocator { + // Index of the bucket containing the variable in the `buckets_` vector + size_t bucket_index; + // Index of the variable in the bucket, which may be used consistently + // across `bucket_views_in`, `bucket_views_out`, `variables`, `offsets`, + // `lengths`, `sizes_vec`, and `variable_indices` in `Bucket` + size_t intra_bucket_index; + + VariableLocator() = default; + + VariableLocator(size_t bucket_index_, size_t intra_bucket_index_) + : bucket_index(bucket_index_), + intra_bucket_index(intra_bucket_index_) {} + }; + + // Map the index of a variable to its location in the bucket structure. + std::vector variable_locators_; + + // track the number of iterations to synchronize grads in training so far. + long num_iterations_; + // track distinct iteration of backward call. This is distinct from + // num_iterations_, for example in the case of multiple forward before + // backward. + long num_bwd_calls_; + // whether the first autograd hook for a distinct backward pass has been + // called. + bool first_autograd_hook_called_; + // track the number of buckets that have been ready for + // communication calls like allReduce or communication hooks. + int num_buckets_ready_; + + // Timing information. + int64_t backward_compute_start_time_ = -1; + std::unique_ptr timer_; + + // We collect the relative timestamp of every gradient being ready + // when executing autograd. This can be used to derive a timeline of + // the point in time buckets were ready, or ideal bucket assignment/ordering. + std::vector backward_stats_; + + bool should_collect_runtime_stats(); + void record_forward_compute_start_time(); + void record_backward_compute_start_time(); + void record_backward_compute_end_time(); + void record_backward_comm_start_time(); + void record_backward_comm_end_time(); + + int get_ddp_runtime_logging_sample_rate(); + int ddp_runtime_logging_sample_rate_ = kDDPRuntimeLoggingSampleRate; + + bool is_multi_device_module_ = false; + + // Following variables are to help build dynamic bucket order + bool has_rebuilt_bucket_; + std::vector rebuilt_params_; + std::vector rebuilt_param_indices_; + const int64_t bucket_bytes_cap_; + +#ifndef _WIN32 + struct RpcContext { + using ContextPtr = torch::distributed::autograd::ContextPtr; + // The shared_ptr is to hold the context instance. + ContextPtr context_ptr_holder; + std::atomic context_ptr{nullptr}; + + void set(ContextPtr&& new_context_ptr); + }; + RpcContext rpc_context_; +#endif + + // A struct containing work handle and tensor for allreduce scheduled in + // forward pass, if applicable. + struct ForwardPassAllreduceWork { + c10::intrusive_ptr workHandle; + at::Tensor resultTensor; + // whether we should divide by the initial world_size or the no. of + // remaining DDP ranks. + bool useStaticWorldSize; + }; + + // Handle for the currently scheduled allreduce in the forward pass, if + // applicable. + ForwardPassAllreduceWork forwardPassWorkHandle_; + + // Division factor for reduction of gradients. + // Equal to the process group size, with an exception of handling uneven + // input. + int div_factor_; + + bool static_graph_; + + // Key: size_t (index), Value: the number of times that a variable's + // autograd_hook() should be triggered before marking this variable's grad as + // ready for communication. Map will not change after 1st iteration. + std::unordered_map numGradHooksTriggeredMap_; + // Key: size_t (index), Value: the number of times that a variable's + // autograd_hook() are left to be triggered before marking this variable's + // grad as ready for communication. Map will change after 1st iteration to + // track a grad is ready for communication or not. + std::unordered_map numGradHooksTriggeredMapPerIteration_; + + private: + // reset counting for buckets before backward starts + void reset_bucket_counting(); + // search unused parameters beore backward starts + void search_unused_parameters( + const std::vector& outputs); + void set_divide_factor(); + // kick off all reduce for the ready bucket + void all_reduce_bucket(Bucket& bucket); + // kick off all reduce to local used map, it can help find global unused + // parameters + void all_reduce_local_used_map(); + // initialize locally used parameter maps + void initialize_local_used_map(); + // get current cuda stream + const c10::Stream get_current_stream(); + bool dynamic_graph_find_unused(); + bool static_graph_first_iteration(); + bool static_graph_after_first_iteration(); + + // comm_hook_ is used to access the DDP communication hook if registered. + std::unique_ptr comm_hook_; + + // Sparse metadata contains the indices that will be used + // when calling into sparse allreduce. + // This is only used in the sparse allreduce collective calls + std::unique_ptr> sparse_metadata_; + + // Debug level setting. It is parsed once when Reducer is constructed, and + // remains the same across a single invocation of DDP training. + DebugLevel ddp_debug_level_; + // Mapping of variable index to fully qualified name of model to notify users + // about errors when certain parameters do not get gradient. + std::unordered_map param_names_; + // Variable indices stored sequentially in order of when the gradient is ready + // for the current backwards pass. + std::vector grad_ready_order_indices_; + // Bytes capacity of first bucket, can be configured by user + int64_t first_bucket_bytes_cap_; + // Per iteration set of parameter indices that have been marked ready. + std::unordered_set perIterationReadyParams_; + // Retrieves parameter names that have not been marked as ready as part of + // previous iteration. + std::vector getUnmarkedParamsForIteration(); + // Retrieves parameter indices that have not been marked as ready as part of + // previous iteration. + std::vector getUnmarkedParamIndicesForIteration(); + // Raises appropriate error if mark_variable_ready is called on the same + // variable twice, which is unexpected. + void checkAndRaiseMarkedTwiceError(size_t curVariableIndex); + // Retrieves parameter corresponding to the given VariableIndex. + at::Tensor& get_param_from_index(size_t index); + + // Cached bucket index to model parameter mapping. Populated after buckets + // are rebuilt after which this mapping is static. + mutable std::unordered_map> + cached_variables_for_bucket_; + + bool optim_in_backward_{false}; + friend class Logger; +}; + +// This is equivalent to take_tensors but returns indices into the +// tensor list argument for bucket assignment. Also, it is aware +// of device placement and will not allow buckets to span devices. +// The index of tensors[i] assigned to bucket is tensor_indices[i], +// when tensor_indices is empty, the index of tensors[i] assigned to +// bucket is i. +TORCH_API std::tuple>, std::vector> +compute_bucket_assignment_by_size( + const std::vector& tensors, + const std::vector& bucket_size, + const std::vector& expect_sparse_gradient = {}, + const std::vector& tensor_indices = {}, + const std::optional>& logger = {}); + +// Verify models across all processes are the same as model on rank 0 with +// respect to no. of params and matching dtype/size/layout. +TORCH_API void verify_params_across_processes( + const c10::intrusive_ptr& process_group, + const std::vector& params, + const std::optional>& logger); +} // namespace c10d diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/py_rref.h b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/py_rref.h new file mode 100644 index 0000000000000000000000000000000000000000..2c9fd3433d045e68e990408bdd0e18bae305c671 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/py_rref.h @@ -0,0 +1,84 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +enum RRefProxyType { RPC_SYNC, RPC_ASYNC, REMOTE }; + +// Python wrapper of an RRef shared_ptr that supports Python +// pickle and unpickle. +class PYBIND11_EXPORT PyRRef { + public: + // The first ctor can only be called while holding GIL. See its implementation + // for more explanations. + explicit PyRRef(const py::object& value, const py::object& type_hint); + explicit PyRRef(c10::intrusive_ptr rref); + PyRRef(const PyRRef&) = default; + ~PyRRef(); + + bool isOwner() const; + bool confirmedByOwner() const; + WorkerInfo owner() const; + std::string ownerName() const; + py::object toHere( + const float timeoutSeconds = + torch::distributed::rpc::kUnsetRpcTimeout) const; + py::object localValue() const; + std::string str() const; + py::tuple pickle() const; + static PyRRef unpickle(const py::tuple& t); + c10::IValue toIValue() const; + // Future that is associated with the creation of this RRef on the remote end. + // This is only used to get the future corresponding to the rref for profiling + // use cases. + c10::intrusive_ptr getFuture() const; + // Keeps track of the future responsible for profiling owner creation + // acknowledgement + c10::intrusive_ptr getProfilingFuture() const; + // Sets the future responsible for profiling owner creation acknowledgement. + // This future is set from python to be a future that returns when profiling + // callbacks have been run. + void setProfilingFuture(c10::intrusive_ptr profilingFuture); + + // create a proxy on this RRef, which can be used to launch RPC on the owner + // of this RRef to run functions on the object referenced by this RRef. + py::object createRRefProxy( + const RRefProxyType& mode, + float timeoutSeconds = rpc::kUnsetRpcTimeout) const; + + // get the type of the data object referenced by this RRef. Timeout argument + // is only used in the first invocation of this function as an argument to the + // RPC to the owner node of the RRef. + py::object getRRefType( + float timeout = rpc::kUnsetRpcTimeout, + bool blocking = true); + + // Run the backward pass with the RRef as the root. + void backward(int64_t autogradContextId, bool retainGraph); + + // Helper static function to run backward on a given rref. + static void backward( + int64_t autogradContextId, + bool retainGraph, + const c10::intrusive_ptr& rref); + + // Specialization of backward if the rref is an OwnerRRef. + static void backwardOwnerRRef( + int64_t autogradContextId, + bool retainGraph, + IValue value); + + private: + c10::intrusive_ptr rref_; + std::optional> profilingFuture_; + std::optional type_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_context.h b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_context.h new file mode 100644 index 0000000000000000000000000000000000000000..3282e8c0e108f5ddf86ad2c135f1d1b1ba4097b2 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_context.h @@ -0,0 +1,335 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include + +namespace torch::distributed::rpc { + +namespace callback { +// It's the callback for RemoteCall. +void TORCH_API +confirmPendingUser(const JitFuture& jitFuture, const ForkId& expectedForkId); + +// It's the callback for finishing creating owner rref, it returned deletedRRef, +// so that the deletedRRef can be handled under GIL in python_functions.cpp if +// deletedRRef contains python object. +c10::intrusive_ptr TORCH_API +finishCreatingOwnerRRef(const JitFuture& jitFuture, const RRefId& rrefId); +} // namespace callback + +// Manages RRef lifetime and keeps track of RRef forks. +class TORCH_API RRefContext { + public: + static RRefContext& getInstance(); + // NB: This method must be called before destructing RRefContext singleton. + // Similar to delForkOfOwner, this method returns a vector of OwnerRRefs that + // hold py::object. The call-site is also responsible for resetting those + // shared_ptr objects with a GIL. See comments at delForkOfOwner() for more + // details. + static std::vector> destroyInstance( + bool ignoreRRefLeak = true); + + static void handleException(const JitFuture& jitFuture); + + // handle exception without throw ::c10::Error again + static void handleExceptionSilent(const JitFuture& jitFuture); + + RRefContext(const RRefContext&) = delete; + RRefContext(RRefContext&& other) = delete; + void operator=(const RRefContext&) = delete; + RRefContext& operator=(RRefContext&& other) = delete; + + ~RRefContext(); + + // get the worker id of the current worker + inline worker_id_t getWorkerId() const { + return agent_->getWorkerInfo().id_; + } + + // get the worker name of the current worker + inline const std::string& getWorkerName() const { + return agent_->getWorkerInfo().name_; + } + + // generate a globally unique ID + inline GloballyUniqueId genGloballyUniqueId() { + return GloballyUniqueId(getWorkerId(), nextLocalId_++); + } + + inline const std::shared_ptr& agent() const { + return agent_; + } + + // create a ``UserRRef`` owned by the worker ``ownerId`` + c10::intrusive_ptr createUserRRef( + worker_id_t ownerId, + const TypePtr& type); + + // Convert an RRefForkData into an RRef. This RRef could be user or owner. + // This RRef could have already existed before, or could be created in this + // method, we pass type here to validate or help the rref creation. + c10::intrusive_ptr getOrCreateRRef( + const RRefForkData& rfd, + const TypePtr& type); + + // Get the ``OwnerRRef`` of id ``rrefId``. If it does not exist, create a new + // one. This function is called in two places: + // 1. when processing ``rpc.remote()``, i.e., ``SCRIPT_REMOTE_CALL`` + // ``PYTHON_REMOTE_CALL``. + // 2. when unpickling ``OwnerRRef``. + // What's common in these two cases are, 1) the RRefId is already generated + // 2) the TypePtr is presented. So it can always create the ``OwnerRRef`` if + // it is not yet available. + c10::intrusive_ptr getOrCreateOwnerRRef( + const RRefId& rrefId, + const TypePtr& type); + + // Create an empty owner rref of type. + // This method is called to first time generate an ``OwnerRRef``, e.g., + // 1) ``rpc.RRef(obj)`` + // 2) create the ``OwnerRRef`` on `rpc.remote()` caller side. + // What's common in these two cases are, 1) the RRefId hasn't been generated + // 2) the TypePtr is presented. + c10::intrusive_ptr createOwnerRRef(const TypePtr& type); + + // Returns a Future of the OwnerRRef, which will be marked completed when + // ``OwnerRRef`` is created. This method is used when the TypePtr is not + // available, e.g., when processing to_here(). The forceCreated flag can be + // used to ensure that the rref is created on the owner, otherwise throw in + // cases where the user of this API expects this to return a completed future. + // Note that the return value is a intrusive_ptr to a c10::ivalue::Future that + // holds the RRef. + c10::intrusive_ptr getOwnerRRef( + const RRefId& rrefId, + bool forceCreated = false); + + // Adding the RRefId of an OwnerRRef into the forks_ map. This is useful when + // making a remote call to self, which as for now, still goes through serde + // and invokes request callback. In this case, the OwnerRRef has already been + // created on the send side, and we need to pass it to the receive side, + // instead of creating a new OwnerRRef. This is done by adding the OwnerRRef + // into owners_. However, that alone is not enough, as it could be deleted + // when all UserRRef die, which would then remove the OwnerRRef from owners_ + // and this could happen before the self remote call finishes. To prevent + // that, this API adds the RRefId as a ForkId, which will then delete the + // ForkId when the self remote is done. + void addSelfAsFork(c10::intrusive_ptr& rref); + + // Register a fork of the ``OwnerRRef``, and inserts a intrusive_ptr of the + // ``OwnerRRef`` in a map to keep it alive. + void addForkOfOwner(const RRefId& rrefId, const ForkId& forkId); + // Performs the same function as addForkOfOwner but ignores duplicate + // requests. This idempotent function is used with RREF_FORK_REQUEST calls, + // whereas all other message types use the non-idempotent variant. + void addForkOfOwnerIfNotPresent(const RRefId& rrefId, const ForkId& forkId); + // Delete a fork of the ``OwnerRRef``. NB: this could trigger deletion on the + // IValue or py::object. For the later, this method will acquire GIL. + // NB: If this fork deletion triggered deleting OwnerRRef, this method will + // return a shared_ptr to the OwnerRRef, which is likely to be the last + // shared_ptr instance for it. Therefore, deleting this shared_ptr + // will also trigger deleting the object it points to. If OwnerRRef holds a + // py::object, deleting it require GIL. The call site should guarded it with + // a GIL and reset the shared_ptr. The GIL-guarded deletion is intentionally + // left out of this function to avoid creating dependency on pybind. + c10::intrusive_ptr delForkOfOwner( + const RRefId& rrefId, + const ForkId& forkId); + + // Invoked when pickling an RRef to setup child/fork properly + RRefForkData prepareChildFork(const c10::intrusive_ptr& rref); + // Invoked when unpickling an RRef to send RREF_FORK_REQUEST to owner and + // send RREF_CHILD_ACCEPT to the parent. + // NB: forkId is necessary here as the rref could be an OwnerRRef + void notifyOwnerAndParentOfFork( + const ForkId& forkId, + worker_id_t parent, + const c10::intrusive_ptr& rref); + + // When a UserRRef is forked to another worker (user or owner), it is added + // into pendingChildren_ to be held alive until it receives RREF_CHILD_ACCEPT + // from the child. + // NB: This is necessary for both user and owner child. As we do not have FIFO + // communication between workers, we need this strategy to make sure that all + // previously submitted rpc/remote calls are acked before sending out the + // RREF_USER_DELETE message. Otherwise, the OwnerRRef could be deleted too + // soon. + void addPendingChild( + const ForkId& forkId, + const c10::intrusive_ptr& rref); + void delPendingChild(const ForkId& forkId); + + // When a UserRRef is created, it is added into pendingUsers_ to be held alive + // until it receives RREF_USER_ACCEPT from the owner. + void addPendingUser( + const ForkId& forkId, + const c10::intrusive_ptr& rref); + void delPendingUser(const ForkId& forkId); + void addConfirmedUser( + const ForkId& forkId, + const c10::intrusive_ptr& rref); + + // Retrieve a pending user given the fork ID. Throws if the user has already + // been confirmed (i.e. is no longer in the pendingUsers_ map). + c10::intrusive_ptr getPendingUser(const ForkId& forkId); + + // Start recording new pending UserRRefs. All pending UserRRefs introduced + // after this point will be put into the thread_local userTable_, which will + // then be consumed and cleared in waitForThreadLocalPendingRRefs(). + void recordThreadLocalPendingRRefs(); + // End recording new pending UserRRefs, and clear the thread_local userTable_. + // Returns a Future which will be marked as completed when all pending + // UserRRefs in the current userTable_ are confirmed by their owners. The bool + // value in the Future is unused. + // This method is useful to make sure RRefs in user function arguments are + // confirmed before launching user code. + // NB: Callers of this method does not need to keep the returned Future alive, + // because this Future is already captured in callbacks of the + // PendingUserState. If there is no pending UserRRefs, this method returns a + // completed future. + c10::intrusive_ptr waitForThreadLocalPendingRRefs(); + // Only call this function when there are errors during a recording session, + // and it is likely that waitForThreadLocalPendingRRefs() cannot be invoked + // properly. + // TODO: make this a context guard + void clearRecordedPendingRRefsOnError(); + + void delUser( + const worker_id_t owner, + const RRefId& rrefId, + const ForkId& forkId); + void delAllUsersAndUnforkedOwners(std::chrono::milliseconds timeoutMillis); + + std::unordered_map getDebugInfo(); + + private: + struct PendingUserState { + PendingUserState(c10::intrusive_ptr rref) + : rref_(std::move(rref)), + confirmationFuture_(c10::make_intrusive(BoolType::get())) { + } + + inline void confirm() { + c10::static_intrusive_pointer_cast(rref_)->confirm(); + confirmationFuture_->markCompleted(); + } + + c10::intrusive_ptr rref_; + // Use Future.wait() and Future.markCompleted() to block and unblock user + // functions. The bool value wrapped by the future_ is not used. + c10::intrusive_ptr confirmationFuture_; + }; + + RRefContext(std::shared_ptr); + + c10::intrusive_ptr createUserRRef( + worker_id_t ownerId, + const RRefId& rrefId, + const ForkId& forkId, + const TypePtr& type); + + void finishForkRequest(const ForkId& forkId, worker_id_t parent); + + // If there is any leak on any RRef, this method will throw an error. + void checkRRefLeaks(bool ignoreRRefLeak); + + static std::atomic nextLocalId_; + + const std::shared_ptr agent_; + mutable std::mutex mutex_; + // Keep OwnerRRefs alive until there is no living UserRRefs. + std::unordered_map, RRefId::Hash> owners_; + // A map to track OwnerRRefs that are requested but not yet created. This can + // happen if the to_here() message is processed on the owner before the + // corresponding creator rpc.remote() message. If this happens, instead of + // to_here() RPC thread to block waiting for the OwnerRRef creation, the + // RRefContext returns a Future, so that the RPC request processing logic can + // attach subsequent code as a callback to that Future. + // NB: the OwnerRRefs in this map must be cleared when the corresponding + // OwnerRRef is created. Note that the values in this map are intrusive_ptrs + // to c10::ivalue::Future that will be marked completed with the owner RRef. + std::unordered_map, RRefId::Hash> + pendingOwners_; + // Tracks known living UserRRefs of an OwnerRRef + std::unordered_map< + RRefId, + std::unordered_set, + RRefId::Hash> + forks_; + + // This cond var is used by deleteAllUsers(), a event notification is sent if + // number of pending UserRRef or UserRRef children is reduced, or + // number of owned OwnerRRef is reduced. + std::condition_variable deleteAllUsersCV_; + // The follow 3 maps keep UserRRefs alive by holding a intrusive_ptr to the + // RRef instances. A UserRRef must be added into this map if any of the + // following two conditions is true: + // + // (1) A UserRRef has not been accepted by owner yet. + // + // It can be used or shared, but cannot be deleted, and hence kept alive + // in this map. A message of type RREF_USER_ACCEPT will move the + // corresponding RRef from pendingUsers_ map to confirmedUsers_ map. + std::unordered_map, ForkId::Hash> + pendingUsers_; + // UserRRefs are added into this map when it is confirmed by the owner. + // When destroying RRefContext this map helps to find local UserRRefs + // and send delete messages if they are still not deleted by Python + // garbage collection. + std::unordered_map, ForkId::Hash> + confirmedUsers_; + + // (2) A UserRRef has forked a child UserRRef which has not been accepted by + // the owner yet. + // + // In this case, this UserRRef cannot send out RREF_USER_DELETE message, + // as it could potentially trigger the OwnerRRef been deleted before the + // owner learns about the forked child. + std::unordered_map, ForkId::Hash> + pendingChildren_; + + // The RRef context performs its operations through async RPC requests, in + // order to not block the user code. Therefore the RRef context's state may be + // lagging a bit behind what it is intended to be, while it waits for these + // requests to complete. To allow syncing when needed, we store the count of + // these pending requests, so that users can wait for it to reach zero. + std::atomic numPendingFutures_{0}; + + std::mutex destroyedMutex_; + bool destroyed_{false}; + + // Thread local states to keep UserRRefs deserialized from user function + // arguments. + static thread_local std::vector> userTable_; + // A flag indicating whether subsequently created UserRRefs should be added to + // the thread_local userTable_. The flag is set to true before serializing + // RPC arguments and then set to false before running the corresponding + // user code. See addPendingUser and delPendingUser for more details. + // NB: The reason for having this flag is because addPendingUser are called in + // two cases, and we only want to track the 2nd case. + // (1) RRef as the return value: when calling rpc.remote, the UserRRef on the + // caller side is added to the context using addPendingUser. + // (2) RRef as an argument: When running an RPC using RRefs as arguments, the + // RRef is forwarded to the callee as new UserRRefs (if the callee is not + // the owner). In this case, we block running the user function until all + // UserRRefs are confirmed by the owner. + // This contract gurantees that no UserRRefs can be used remotely without + // confirmation. Note that, however, the UserRRef created by rpc.remote can + // still be passed to local functions as arguments and used there. This is by + // design, because this feature is especially useful when, say a master node + // creates multiple UserRRefs in a loop and then shares them with other nodes. + // Blocking every iteration in the loop until RRefs are confirmed will slow + // this down. This nuance on UserRRef can be interpreted as we only make + // exceptions for UserRRef creators. And using the UserRRef on its creator + // without confirmation is OK, because the creator would either call to_here + // or forward the UserRRef, and both would then require confirmations from the + // owner. + static thread_local bool recording_; +}; + +} // namespace torch::distributed::rpc diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_impl.h b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..8a1634ca61f30acfaf661b19b8a52b3fbde3b21b --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_impl.h @@ -0,0 +1,416 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace torch::distributed::rpc { + +class RRef; +class RRefContext; +class UserRRef; + +constexpr int OWNER_IDX = 0; // index of ownerId in the tuple +constexpr int RREFID_ON_IDX = 1; // index of RRefId.createdOn_ in the tuple +constexpr int RREFID_ID_IDX = 2; // index of RRefId.localId_ in the tuple +constexpr int FORKID_ON_IDX = 3; // index of ForkId.createdOn_ in the tuple +constexpr int FORKID_ID_IDX = 4; // index of ForkId.localId_ in the tuple +constexpr int PARENT_IDX = 5; // index of parent in the tuple +constexpr int TYPE_IDX = 6; // index of parent in the tuple + +// NB: if more fields are added, make sure this field is also bumped +constexpr int RFD_TUPLE_SIZE = 7; // number of RRefForkData fields in py::tuple + +// Represents fork of an RRef to be sent over the wire. +struct TORCH_API RRefForkData { + const worker_id_t ownerId_; + const RRefId rrefId_; + const ForkId forkId_; + const worker_id_t parent_; + const std::string typeStr_; + + RRefForkData( + worker_id_t ownerId, + const RRefId& rrefId, + const ForkId& forkId, + worker_id_t parent, + std::string typeStr); +}; + +// Note [RRef Protocol] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~ +// +// [Background] +// +// RRef stands for Remote REFerence. Each RRef is owned by a single worker +// (i.e., owner) and can be used by multiple users. The owner stores the real +// data referenced by its RRefs. RRef needs to support fast and scalable RPC. +// Hence, in the design, we avoid using a single global master to keep RRef +// states, instead owners will keep track of the global reference counts +// for its RRefs. Every RRef can be uniquely identified by a global RRefId, +// which is assigned at the time it is first created either on a user or on the +// owner. +// +// On the owner worker, there is only one OwnerRRef instance, which contains the +// real data, while on user workers, there can be as many UserRRefs as +// necessary, and UserRRef does not hold the data. All usage on the OwnerRRef +// should retrieve the unique OwnerRRef instance using the globally unique +// RRefId. //A UserRRef will be created when it is used as an argument or return +// value in dist.rpc or dist.remote call, but RRef forking and reference +// counting (RC) are completely transparent to applications. Every UserRRef will +// also have its globally unique ForkId. +// +// [Assumptions] +// +// 1. Transient Network Failures +// +// TODO: current RRef implementation does not tolerate failures +// +// The RRef design handles transient network failures by retrying +// messages. Node crashes or permanent network partition is beyond the scope. +// When those incidents occur, the application may take down all workers, revert +// to the previous checkpoint, and resume training. +// +// 2. Non-idempotent UDFs +// +// We assume UDFs are not idempotent and therefore cannot be retried. However, +// internal RRef control messages are idempotent and retried upon message +// failure. +// +// TODO: RRef internal messages are not yet idempotent +// +// 3. Out of Order Message Delivery +// +// We do not assume message delivery order between any pair of nodes, because +// both sender and receiver are using multiple threads. There is no guarantee on +// which message will be processed first. +// +// [RRef Lifetime] +// +// The goal of the protocol is to delete an OwnerRRef at an appropriate time. +// The right time to delete an OwnerRRef is when there are no living UserRRefs +// and Python GC also agrees to delete the OwnerRRef instance on the owner. The +// tricky part is to determine if there are any living UserRRefs. +// +// A user can get a UserRRef in three situations: +// +// (1). Receiving a UserRRef from the owner. +// (2). Receiving a UserRRef from another user. +// (3). Creating a new UserRRef owned by another worker. +// +// (1) is the simplest case where the owner initiates the fork, and hence it can +// easily increment local RC. The only requirement is that any UserRRef must +// notify the owner before destruction. Hence, we need the first guarantee: +// +// G1. The owner will be notified when any UserRRef is deleted. +// +// As messages might come delayed or out-of-order, we need more one guarantee to +// make sure the delete message is not sent out too soon. Let us first introduce +// a new concept. If A sends an RPC to B that involves an RRef, we call the RRef +// on A the parent RRef and the RRef on B the child RRef. +// +// G2. Parent RRef cannot be deleted until the child RRef is confirmed by the +// owner. +// +// Under (1), where the caller is UserRRef and callee is OwnerRRef, it simply +// means that the user will not send out the delete message until all previous +// messages are ACKed. Note that ACKed does not mean the owner finishes +// executing the function, instead, it only means the owner has retrieved its +// local OwnerRRef and about to pass it to the function, which is sufficient to +// keep the OwnerRRef alive even if the delete message from the user arrives at +// the owner before the function finishes execution. +// +// With (2) and (3), it is possible that the owner only partially knows the RRef +// fork graph or not even knowing it at all. For example, the RRef could be +// constructed on a user, and before the owner receives the RPC call, the +// creator user might have already shared the RRef with other users, and those +// users could further share the RRef. One invariant is that the fork graph of +// any RRef is always a tree rooted at the owner, because forking an RRef always +// creates a new RRef instance, and hence every RRef has a single parent. One +// nasty detail is that when an RRef is created on a user, technically the owner +// is not its parent but we still consider it that way and it does not break the +// argument below. +// +// The owner's view on any node (fork) in the tree has three stages: +// +// 1) unknown -> 2) known -> 3) deleted. +// +// The owner's view on the entire tree keeps changing. The owner deletes its +// OwnerRRef instance when it thinks there are no living UserRRefs, i.e., when +// OwnerRRef is deleted, all UserRRefs could be either indeed deleted or +// unknown. The dangerous case is when some forks are unknown and others are +// deleted. +// +// G2 trivially guarantees that no parent UserRRef Y can be deleted before the +// owner knows all of Y's children UserRRefs. +// +// However, it is possible that the child UserRRef Z may be deleted before the +// owner knows its parent Y. More specifically, this can happen when all of Z's +// messages are processed by the owner before all messages from Y, including the +// delete message. Nevertheless, this does not cause any problem. Because, at +// least one of Y's ancestor will be alive, and it will prevent the owner from +// deleting the OwnerRRef. Consider the following example: (NB: this scenario +// will no longer relevant when we block UDF until all RRefs are confirmed by +// the owner) +// +// OwnerRRef -> A -> Y -> Z +// +// OwnerRRef forks to A, then A forks to Y, and Y forks to Z. Z can be deleted +// without OwnerRRef knowing Y. However, the OwnerRRef will at least know A, as +// the owner directly forks the RRef to A. A won't die before the owner knows Y. +// +// Things get a little trickier if the RRef is created on a user: +// +// OwnerRRef +// ^ +// | +// A -> Y -> Z +// +// If Z calls to_here on the UserRRef, the owner at least knows A when Z is +// deleted, because otherwise to_here wouldn't finish. If Z does not call +// to_here, it is possible that the owner receives all messages from Z before +// any message from A and Y. In this case, as the real data of the OwnerRRef has +// not been created yet, there is nothing to be deleted either. It is the same +// as Z does not exist at all Hence, it's still OK. +// +// See #26759 for more details and discussions. +// +// TODO: make RRef an IValue, and edit createStackForSchema accordingly +// TODO: make RRef system messages idempotent and retry on failures. +// +// ``RRef`` is the base type for both ``UserRRef`` and ``OwnerRRef``. +// Each ``RRef`` has a globally unique ``RRefId``. +class TORCH_API RRef : public RRefInterface { + public: + // RRef is made NOT copyable NOT movable to prevent messing up reference + // counting. + explicit RRef(const RRef& other) = delete; + explicit RRef(RRef&& other) = delete; + RRef& operator=(RRef&& other) = delete; + + ~RRef() override = default; + + // returns the worker id of the owner + inline worker_id_t owner() const override { + return ownerId_; + } + + // returns the worker name of the owner + inline std::string ownerName() const override { + return RpcAgent::getCurrentRpcAgent()->getWorkerInfo(ownerId_).name_; + } + + // returns the worker info of the owner + inline WorkerInfo ownerWorkerInfo() const { + return RpcAgent::getCurrentRpcAgent()->getWorkerInfo(ownerId_); + } + + // Returns the globally unique RRefId of this RRef + inline const RRefId& rrefId() const { + return rrefId_; + } + + inline bool isPyObj() const { + return type_ == PyObjectType::get(); + } + inline const TypePtr type() const override { + return type_; + } + + // Save the future corresponding to the creation of this RRef on a remote + // node. Note that this is only set when processing requests invoked with + // rpc.remote. This is only used to get the future corresponding to the rref + // for profiling use cases. + inline void registerOwnerCreationFuture(c10::intrusive_ptr fut) { + ownerCreationFuture_ = std::move(fut); + } + + // Get the future corresponding to the creation of this rref. + inline c10::intrusive_ptr getOwnerCreationFuture() const { + return ownerCreationFuture_; + } + + // Check if creation of this RRef on owner node has timed out. + inline bool getTimedOut() const { + return timedOut_.load(); + } + + // Dispatches an error to the correct handler based on its RPCErrorType. + void handleError(RPCErrorType errorType, const JitFuture& JitFuture); + + // Send delete UserRRef request to Owner, + // if the request hasn't been sent yet. + // There are 2 cases to call it, + // 1, Python GC decides end of UserRRef lifetime, calling destructor. + // 2, RPC module graceful shutdown calls it on all UserRRefs tracked + // in the RRefContext. + virtual void tryDel() {} + + protected: + // Indicates that the creation of this RRef on owner node has timed out. + inline void setTimedOut() { + timedOut_ = true; + } + friend class RRefContext; + + RRef(worker_id_t ownerId, const RRefId& rrefId, TypePtr type); + + virtual RRefForkData fork() const; + + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const worker_id_t ownerId_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const RRefId rrefId_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::atomic timedOut_{false}; + + // type field to denote the type of the element that the RRef is holding + // it could be any TypePtr that JIT support, including PyObjectType + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const TypePtr type_; + // Future corresponding to request to create RRef on remote node. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + c10::intrusive_ptr ownerCreationFuture_; +}; + +// ``UserRRef`` represents a user of an RRef. Besides the ``RRefId``, each user +// also has a globally unique ``ForkId`` to identify this user. ``UserRRef`` +// never owns the real value, the only way to get the value of the ``RRef`` is +// to call ``to_here()`` and get a copy.. +class TORCH_API UserRRef final : public RRef { + public: + UserRRef(const UserRRef& other) = delete; + UserRRef(UserRRef&& other) = delete; + UserRRef& operator=(const UserRRef& other) = delete; + UserRRef& operator=(UserRRef&& other) = delete; + + UserRRef( + worker_id_t ownerId, + const RRefId& rrefId, + const ForkId& forkId, + TypePtr type); + + inline bool isOwner() const override { + return false; + } + + inline bool confirmedByOwner() const override { + return confirmedByOwner_; + } + + // Returns the globally unique ForkId of this RRef + const ForkId& forkId() const; + + // Get of copy of the value from the ``OwnerRRef``. If the value is not ready + // yet, this call will block. + IValue toHere( + const float timeoutSeconds = + torch::distributed::rpc::kUnsetRpcTimeout) const; + + void tryDel() override; + + // Will be called when refcount reaches 0. + // Upon destruction, this ``UserRRef`` will tell the owner to deref. + void release_resources() override; + + // Will be called when both refcount and weakcount reach 0. See + // https://github.com/pytorch/pytorch/blob/9116f02bebf3a5260feef5732d36c54ecb3b4033/c10/util/intrusive_ptr.h#L204 + // This is called on destructing the wrapping intrusive_ptr_target instance + // and it's data members. + ~UserRRef() override; + + private: + friend class RRefContext; + + RRefForkData fork() const override; + inline void confirm() { + confirmedByOwner_ = true; + } + + const ForkId forkId_; + + // Indicates if this user has sent delete message to it's owner. + // Note, thread safety is needed because delete message could be sent by + // either the destructor called by Python garbage collection or RRefContext + // proactive cleanup on RPC graceful shutdown. + std::mutex deletedOnOwnerMutex_; + bool deletedOnOwner_{false}; + // Indicating whether this UserRRef has been confirmed by its owner. + std::atomic confirmedByOwner_; +}; + +// Keep the template only on the derived class because ``RRefContext`` needs to +// erase the type on ``RRef`` and keep them in one map. +class TORCH_API OwnerRRef final : public RRef { + public: + OwnerRRef(const OwnerRRef& other) = delete; + OwnerRRef(OwnerRRef&& other) = delete; + OwnerRRef& operator=(const OwnerRRef& other) = delete; + OwnerRRef& operator=(OwnerRRef&& other) = delete; + + OwnerRRef( + worker_id_t ownerId, + const RRefId& rrefId, + TypePtr type, + std::vector devices); + + OwnerRRef( + worker_id_t ownerId, + const RRefId& rrefId, + TypePtr type, + std::optional value, + std::vector devices); + + inline bool isOwner() const override { + return true; + } + + // OwnerRRef is always confirmed, while UserRRef is only confirmed when the + // owner knows about it. + inline bool confirmedByOwner() const override { + return true; + } + + // Get a constant reference of the real value. This method will block if the + // value is not ready. This method does not need GIL as it does not create + // any new py::object. It will throw if there is an error. + const IValue& getValue() const; + + // Set the value of this ``OwnerRRef``. This method does not need GIL as it + // does not create any new py::object. + void setValue(IValue&& value); + // Sets the value of this ``OwnerRRef`` to contain an exception. + void setError(std::exception_ptr eptr); + + // Has a value or error been set? + bool hasValue() const; + // Gets a future that is satisfied when the value or error is set. + c10::intrusive_ptr getFuture(); + + private: + friend class RRefContext; + + c10::intrusive_ptr future_; +}; + +TORCH_API std::ostream& operator<<(std::ostream& os, const RRef& rref); + +// Helper function that casts from c10::RRefInterface to OwnerRRef +inline TORCH_API c10::intrusive_ptr fromRRefInterface( + const c10::intrusive_ptr& rrefInterface) { + return c10::static_intrusive_pointer_cast(rrefInterface); +} + +// Helper function that casts from OwnerRRef to c10::RRefInterface +inline TORCH_API c10::intrusive_ptr fromOwnerRRef( + const c10::intrusive_ptr& ownerRRef) { + return c10::static_intrusive_pointer_cast(ownerRRef); +} + +} // namespace torch::distributed::rpc diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_call.h b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_call.h new file mode 100644 index 0000000000000000000000000000000000000000..5db4adf95f85bad2b712293e0e85ec12f60af3ef --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_call.h @@ -0,0 +1,71 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +using torch::jit::Operator; + +// A ScriptCall instance represents an invocation of a builtin operator for a +// TorchScript function. If it is a builtin operator, it +// contains a shared ptr to the `Operator` and a list of arguments. +// If it is a TorchScript function, it contains a non empty qualifiedName string +// to the TorchScript function schema name and a list of arguments. +class TORCH_API ScriptCall : public RpcCommandBase { + public: + // Constructor for builitin operator call. + ScriptCall(std::shared_ptr op, std::vector&& stack); + // Constructor for TorchScript function call. + ScriptCall( + const c10::QualifiedName& qualifiedName, + std::vector&& stack, + const bool isAsyncExecution = false); + + bool hasOp() const; + std::shared_ptr op() const; + bool hasQualifiedName() const; + const c10::QualifiedName& qualifiedName() const; + // return the argument stack of this builtin operator + const std::vector& stack() const; + std::vector& stackRef(); + inline bool isAsyncExecution() const { + return isAsyncExecution_; + } + + c10::intrusive_ptr toMessageImpl() && override; + static std::unique_ptr fromMessage(const Message& message); + + ~ScriptCall() override = default; + + protected: + virtual void toIValues(std::vector& ivalues) const; + static std::unique_ptr fromIValues( + std::vector& ivalues); + + private: + // Given an operator symbol and a string schema, return the matched operator. + static std::shared_ptr matchOperator(const std::string& str_schema); + + static const std::string BUILTIN_OP_NAMESPACE_; + static const std::string ATEN_PREFIX_; + + // This field has value if this ScriptCall represents invocation of a builtin + // operator. + std::optional> op_; + // This field has non empty string if this ScriptCall represents invocation of + // an annotated torchscript function defined by users. + std::optional qualifiedName_; + std::vector stack_; + const bool isAsyncExecution_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_resp.h b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_resp.h new file mode 100644 index 0000000000000000000000000000000000000000..958b59bab5bbd1d8818964e5af9c42b3f4a16154 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_resp.h @@ -0,0 +1,26 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +// Return value of a builtin operator or a TorchScript function. +class TORCH_API ScriptResp final : public RpcCommandBase { + public: + explicit ScriptResp(at::IValue&& values); + + const at::IValue& value(); + c10::intrusive_ptr toMessageImpl() && override; + static std::unique_ptr fromMessage(const Message& message); + + private: + const at::IValue value_; +}; + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/torchscript_functions.h b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/torchscript_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..5338c23108096708481b1a5a0659862e2db20881 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/torchscript_functions.h @@ -0,0 +1,37 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch::distributed::rpc { + +// This function sends an rpc call to run torchscript function, currently the +// torchscript function could only be a user defined python function with +// "@torch.jit.script" annotation. The torchscript function could not be +// a class constructor, class method, instance method or a script module. +// dst: destination worker name +// qualifiedName: torchscript function qualified name string like +// "moduleName::torchscriptFunctionName", e.g, +// "dist_autograd_test::my_py_add" +// stack: a bag of IValue args passed to torchscriptFunctionName +// It returns c10::intrusive_ptr +c10::intrusive_ptr TORCH_API rpcTorchscript( + const std::string& dstWorkerName, + const c10::QualifiedName& qualifiedName, + const c10::FunctionSchema& functionSchema, + std::vector& stack, + const float rpcTimeoutSeconds = torch::distributed::rpc::kUnsetRpcTimeout, + const bool isAsyncExecution = false); + +c10::intrusive_ptr TORCH_API remoteTorchscript( + const std::string& dstWorkerName, + const c10::QualifiedName& qualifiedName, + const c10::FunctionSchema& functionSchema, + std::vector& stack, + const float rpcTimeoutSeconds = torch::distributed::rpc::kUnsetRpcTimeout, + const bool isAsyncExecution = false); + +} // namespace torch::distributed::rpc diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/types.h b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/types.h new file mode 100644 index 0000000000000000000000000000000000000000..4babac93713f263e3aee4f96c0e16cb39584c072 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/types.h @@ -0,0 +1,62 @@ +#pragma once + +#include +#include + +namespace torch::distributed::rpc { + +using worker_id_t = int16_t; +using local_id_t = int64_t; + +bool getAllowJitRRefPickle(); +TORCH_API void enableJitRRefPickle(); +TORCH_API void disableJitRRefPickle(); + +struct TORCH_API JitRRefPickleGuard { + JitRRefPickleGuard(); + ~JitRRefPickleGuard(); +}; + +struct TORCH_API GloballyUniqueId final { + GloballyUniqueId(worker_id_t createdOn, local_id_t localId); + GloballyUniqueId(const GloballyUniqueId& other) = default; + GloballyUniqueId& operator=(const GloballyUniqueId& other) = delete; + + bool operator==(const GloballyUniqueId& other) const; + bool operator!=(const GloballyUniqueId& other) const; + + at::IValue toIValue() const; + static GloballyUniqueId fromIValue(const at::IValue&); + + struct Hash { + size_t operator()(const GloballyUniqueId& key) const { + return (uint64_t(key.createdOn_) << kLocalIdBits) | key.localId_; + } + }; + + static constexpr int kLocalIdBits = 48; + + const worker_id_t createdOn_; + const local_id_t localId_; +}; + +TORCH_API std::ostream& operator<<( + std::ostream& os, + const GloballyUniqueId& globalId); + +using RRefId = GloballyUniqueId; +using ForkId = GloballyUniqueId; +using ProfilingId = GloballyUniqueId; + +struct TORCH_API SerializedPyObj final { + SerializedPyObj(std::string&& payload, std::vector&& tensors) + : payload_(std::move(payload)), tensors_(std::move(tensors)) {} + + std::vector toIValues() &&; + static SerializedPyObj fromIValues(std::vector value); + + std::string payload_; + std::vector tensors_; +}; + +} // namespace torch::distributed::rpc diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_call.h b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_call.h new file mode 100644 index 0000000000000000000000000000000000000000..22d13d2e833cee8135b7634045f68377b10a2e86 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_call.h @@ -0,0 +1,38 @@ +#pragma once + +#include +#include +#include + +namespace torch::distributed::rpc { + +// This class converts the content in a PythonCall into py::object. This is a +// helper class to make sure that all arguments deserialization is done before +// entering RequestCallbackImpl::processRpc(...), so that the deserialization +// related logic can be carried out in one spot instead of scattered in multiple +// places for different message types. +// NB: The reason for not consolidating class into PythonCall is because +// PythonCall is a libtorch type which should not depend on Python types. +class TORCH_API UnpickledPythonCall : public RpcCommandBase { + public: + UnpickledPythonCall( + const SerializedPyObj& serializedPyObj, + bool isAsyncExecution); + ~UnpickledPythonCall() override; + + // toMessage() method is not implemented, as objects of this class should + // never be directly converted into a Message object. + c10::intrusive_ptr toMessageImpl() && override; + const py::object& pythonUdf() const; + + inline bool isAsyncExecution() const { + return isAsyncExecution_; + } + + private: + py::object pythonUdf_; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + const bool isAsyncExecution_; +}; + +} // namespace torch::distributed::rpc diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_remote_call.h b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_remote_call.h new file mode 100644 index 0000000000000000000000000000000000000000..99386ffaaf96e069b18a39b040abe06d19deea5c --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_remote_call.h @@ -0,0 +1,33 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch::distributed::rpc { + +// This class converts the content in a PythonRemoteCall into py::object. This +// is a helper class to make sure that all arguments deserialization is done +// before entering RequestCallbackImpl::processRpc(...), so that the +// deserialization related logic can be carried out in one spot instead of +// scattered in multiple places for different message types. +// NB: The reason for not consolidating class into PythonRemoteCall is because +// PythonRemoteCall is a libtorch type which should not depend on Python types. +class TORCH_API UnpickledPythonRemoteCall final : public UnpickledPythonCall { + public: + explicit UnpickledPythonRemoteCall( + const SerializedPyObj& serializedPyObj, + const at::IValue& retRRefId, + const at::IValue& retForkId, + const bool isAsyncExecution); + + const RRefId& rrefId() const; + const ForkId& forkId() const; + + private: + RRefId rrefId_; + ForkId forkId_; +}; + +} // namespace torch::distributed::rpc diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/utils.h b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..3627d0db14f9c104600d9c1f6af51dd2833a2971 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/utils.h @@ -0,0 +1,90 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace distributed { +namespace rpc { + +// Parse error message and return RPCErrorType based on the message. +TORCH_API RPCErrorType getRPCErrorType(const JitFuture& jitFuture); +// Create an error string given the error description and error type +TORCH_API std::string makeRPCError( + const std::string& rpcErrorStr, + RPCErrorType errorType); + +// Given an RPC message received as a request over the wire, deserialize it into +// the appropriate 'RpcCommandBase' type. +TORCH_API std::unique_ptr deserializeRequest( + const Message& request); + +// Given an RPC message received as a response over the wire, deserialize it +// into the appropriate 'RpcCommandBase' type, if the response is +// FORWARD_AUTOGRAD_RESP type, unwrap it, attach recvBackward() functions +// to received tensors and set the wrappedMsgType to its wrapped message type. +TORCH_API std::unique_ptr deserializeResponse( + const Message& response, + MessageType& wrappedMsgType); + +// Given an RPC message received as a response over the wire, deserialize it +// into the valid IValue if the message is for a script rpc result, +// otherwise deserialize it into dummy none ivalue that will never be used. +// In this deserialization, we also attach recv rpc backward functions if +// needed. +IValue deserializeResptoIValueInternal( + RpcCommandBase& rpc, + MessageType messageType); +TORCH_API IValue deserializeRespToIValue(const Message& message); + +// Note: format is subject to change and intended for RPCs. +// For saving persistently to disk, use torch::save(). +TORCH_API std::string wireSerialize( + const std::vector& payload, + const std::vector& tensors); + +TORCH_API std::pair, std::vector> wireDeserialize( + const void* data, + size_t data_size); + +// We use vector as the type of blobs because it's what rpc::Message uses +// for its payload, even though it has the disadvantage that it cannot be +// allocated with uninitialized memory: it is always zeroed out. + +// Some Tensors are effectively views of larger Tensors, where only a small +// subset of the Storage data is referenced. This normally is good and avoids +// copies when kept locally, but if we naively push the whole Storage over the +// wire, we'll end up with excess network traffic. This change clones tensors if +// we'd save at least half the data, and over a minimum hurdle. +TORCH_API c10::List cloneSparseTensors( + const std::vector& tensors); + +// Combines an original payload and wrapped payload into the original payload. +// Used to generate the overall payload for the wrapped RPC. +TORCH_API void writeWrappedPayload( + std::vector& originalPayload, + std::vector& additionalPayload); + +// Reads the additional, wrapped payload from a wrapped RPC off of the input +// payload. After this, payload will contain the payload of the original, +// un-wrapped RPC. +TORCH_API std::vector readWrappedPayload( + std::vector& payload, + const rpc::Message& message); + +// Takes a list of events from autograd profiler and populates them into +// profiledEvents to be carried over RPC. +TORCH_API void populateRemoteProfiledEvents( + std::vector& profiledEvents, + const torch::autograd::profiler::ProfilerConfig& profilerConfig, + const std::vector>& + eventLists); + +} // namespace rpc +} // namespace distributed +} // namespace torch diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cpp_stacktraces.h b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cpp_stacktraces.h new file mode 100644 index 0000000000000000000000000000000000000000..8c38e972faf71cb653dee89fced30d928c26b725 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cpp_stacktraces.h @@ -0,0 +1,9 @@ +#pragma once + +#include +#include + +namespace torch { +TORCH_API bool get_cpp_stacktraces_enabled(); +TORCH_API torch::unwind::Mode get_symbolize_mode(); +} // namespace torch diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cuda_enabled.h b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cuda_enabled.h new file mode 100644 index 0000000000000000000000000000000000000000..0e3c2f30a83e3d0e2ca4eb7fc0e3e0ff026560e5 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cuda_enabled.h @@ -0,0 +1,13 @@ +#pragma once + +namespace torch::utils { + +inline constexpr bool cuda_enabled() { +#ifdef USE_CUDA + return true; +#else + return false; +#endif +} + +} // namespace torch::utils diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/object_ptr.h b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/object_ptr.h new file mode 100644 index 0000000000000000000000000000000000000000..81ad207306844a4b8e0b57efe72a6079d4a74e26 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/object_ptr.h @@ -0,0 +1,67 @@ +#pragma once + +#include +#include +#include + +template +class TORCH_PYTHON_API THPPointer { + public: + THPPointer() : ptr(nullptr){}; + explicit THPPointer(T* ptr) noexcept : ptr(ptr){}; + THPPointer(THPPointer&& p) noexcept : ptr(std::exchange(p.ptr, nullptr)) {} + + ~THPPointer() { + free(); + }; + T* get() { + return ptr; + } + const T* get() const { + return ptr; + } + T* release() { + T* tmp = ptr; + ptr = nullptr; + return tmp; + } + operator T*() { + return ptr; + } + THPPointer& operator=(T* new_ptr) noexcept { + free(); + ptr = new_ptr; + return *this; + } + THPPointer& operator=(THPPointer&& p) noexcept { + free(); + ptr = p.ptr; + p.ptr = nullptr; + return *this; + } + T* operator->() { + return ptr; + } + explicit operator bool() const { + return ptr != nullptr; + } + + private: + void free(); + T* ptr = nullptr; +}; + +/** + * An RAII-style, owning pointer to a PyObject. You must protect + * destruction of this object with the GIL. + * + * WARNING: Think twice before putting this as a field in a C++ + * struct. This class does NOT take out the GIL on destruction, + * so if you will need to ensure that the destructor of your struct + * is either (a) always invoked when the GIL is taken or (b) takes + * out the GIL itself. Easiest way to avoid this problem is to + * not use THPPointer in this situation. + */ +using THPObjectPtr = THPPointer; +using THPCodeObjectPtr = THPPointer; +using THPFrameObjectPtr = THPPointer; diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pyobject_preservation.h b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pyobject_preservation.h new file mode 100644 index 0000000000000000000000000000000000000000..456095d7b7037d46bd59f8f173795e177ae269a6 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pyobject_preservation.h @@ -0,0 +1,7 @@ +#pragma once + +#include + +// This file contains utilities used for handling PyObject preservation + +void clear_slots(PyTypeObject* type, PyObject* self); diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_scalars.h b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_scalars.h new file mode 100644 index 0000000000000000000000000000000000000000..997425ac7de2ba9148fcbef5ee27b996262885fc --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_scalars.h @@ -0,0 +1,161 @@ +#pragma once + +#include +#include +#include + +#include +#include + +namespace torch::utils { + +template +inline T unpackIntegral(PyObject* obj, const char* type) { +#if PY_VERSION_HEX >= 0x030a00f0 + // In Python-3.10 floats can no longer be silently converted to integers + // Keep backward compatible behavior for now + if (PyFloat_Check(obj)) { + return c10::checked_convert(THPUtils_unpackDouble(obj), type); + } + return c10::checked_convert(THPUtils_unpackLong(obj), type); +#else + return static_cast(THPUtils_unpackLong(obj)); +#endif +} + +inline void store_scalar(void* data, at::ScalarType scalarType, PyObject* obj) { + switch (scalarType) { + case at::kByte: + *(uint8_t*)data = unpackIntegral(obj, "uint8"); + break; + case at::kUInt16: + *(uint16_t*)data = unpackIntegral(obj, "uint16"); + break; + case at::kUInt32: + *(uint32_t*)data = unpackIntegral(obj, "uint32"); + break; + case at::kUInt64: + // NB: This doesn't allow implicit conversion of float to int + *(uint64_t*)data = THPUtils_unpackUInt64(obj); + break; + case at::kChar: + *(int8_t*)data = unpackIntegral(obj, "int8"); + break; + case at::kShort: + *(int16_t*)data = unpackIntegral(obj, "int16"); + break; + case at::kInt: + *(int32_t*)data = unpackIntegral(obj, "int32"); + break; + case at::kLong: + *(int64_t*)data = unpackIntegral(obj, "int64"); + break; + case at::kHalf: + *(at::Half*)data = + at::convert(THPUtils_unpackDouble(obj)); + break; + case at::kFloat: + *(float*)data = (float)THPUtils_unpackDouble(obj); + break; + case at::kDouble: + *(double*)data = THPUtils_unpackDouble(obj); + break; + case at::kComplexHalf: + *(c10::complex*)data = + (c10::complex)static_cast>( + THPUtils_unpackComplexDouble(obj)); + break; + case at::kComplexFloat: + *(c10::complex*)data = + (c10::complex)THPUtils_unpackComplexDouble(obj); + break; + case at::kComplexDouble: + *(c10::complex*)data = THPUtils_unpackComplexDouble(obj); + break; + case at::kBool: + *(bool*)data = THPUtils_unpackNumberAsBool(obj); + break; + case at::kBFloat16: + *(at::BFloat16*)data = + at::convert(THPUtils_unpackDouble(obj)); + break; + case at::kFloat8_e5m2: + *(at::Float8_e5m2*)data = + at::convert(THPUtils_unpackDouble(obj)); + break; + case at::kFloat8_e5m2fnuz: + *(at::Float8_e5m2fnuz*)data = + at::convert(THPUtils_unpackDouble(obj)); + break; + case at::kFloat8_e4m3fn: + *(at::Float8_e4m3fn*)data = + at::convert(THPUtils_unpackDouble(obj)); + break; + case at::kFloat8_e4m3fnuz: + *(at::Float8_e4m3fnuz*)data = + at::convert(THPUtils_unpackDouble(obj)); + break; + default: + throw std::runtime_error("invalid type"); + } +} + +inline PyObject* load_scalar(const void* data, at::ScalarType scalarType) { + switch (scalarType) { + case at::kByte: + return THPUtils_packInt64(*(uint8_t*)data); + case at::kUInt16: + return THPUtils_packInt64(*(uint16_t*)data); + case at::kUInt32: + return THPUtils_packUInt32(*(uint32_t*)data); + case at::kUInt64: + return THPUtils_packUInt64(*(uint64_t*)data); + case at::kChar: + return THPUtils_packInt64(*(int8_t*)data); + case at::kShort: + return THPUtils_packInt64(*(int16_t*)data); + case at::kInt: + return THPUtils_packInt64(*(int32_t*)data); + case at::kLong: + return THPUtils_packInt64(*(int64_t*)data); + case at::kHalf: + return PyFloat_FromDouble( + at::convert(*(at::Half*)data)); + case at::kFloat: + return PyFloat_FromDouble(*(float*)data); + case at::kDouble: + return PyFloat_FromDouble(*(double*)data); + case at::kComplexHalf: { + auto data_ = reinterpret_cast*>(data); + return PyComplex_FromDoubles(data_->real(), data_->imag()); + } + case at::kComplexFloat: { + auto data_ = reinterpret_cast*>(data); + return PyComplex_FromDoubles(data_->real(), data_->imag()); + } + case at::kComplexDouble: + return PyComplex_FromCComplex( + *reinterpret_cast((c10::complex*)data)); + case at::kBool: + return PyBool_FromLong(*(bool*)data); + case at::kBFloat16: + return PyFloat_FromDouble( + at::convert(*(at::BFloat16*)data)); + case at::kFloat8_e5m2: + return PyFloat_FromDouble( + at::convert(*(at::Float8_e5m2*)data)); + case at::kFloat8_e4m3fn: + return PyFloat_FromDouble( + at::convert(*(at::Float8_e4m3fn*)data)); + case at::kFloat8_e5m2fnuz: + return PyFloat_FromDouble(at::convert( + *(at::Float8_e5m2fnuz*)data)); + case at::kFloat8_e4m3fnuz: + return PyFloat_FromDouble(at::convert( + *(at::Float8_e4m3fnuz*)data)); + default: + throw std::runtime_error("invalid type"); + } +} + +} // namespace torch::utils diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_new.h b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_new.h new file mode 100644 index 0000000000000000000000000000000000000000..088f8d1927c4732d8543ca82a39c08247257066a --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_new.h @@ -0,0 +1,136 @@ +#pragma once + +#include +#include + +#include + +namespace torch::utils { + +// NOTE: [torch.tensor, lift_fresh, and device movement] +// +// The `only_lift_cpu_tensors` flag controls what happens on torch.tensor([1, 2, +// 3], device="cuda") (or any non-CPU devices). +// +// If false (default): +// - the data gets moved into a CPU Tensor +// - then, it gets moved to cuda (via .to) +// - finally, we call lift_fresh() on it. +// Steps 1 and 2 happen with all modes disabled. +// +// If true: +// - the data gets moved into a CPU Tensor (with correct dtype) +// - we call lift_fresh() on it +// - finally, we move it to cuda (via .to) +// Step 1 happens with all modes disabled. +// +// `only_lift_cpu_tensors=true` is useful to prevent CUDA initialization under +// FakeTensorMode because it avoids moving concrete data to CUDA. +TORCH_API bool only_lift_cpu_tensors(); +TORCH_API void set_only_lift_cpu_tensors(bool value); + +at::Tensor base_tensor_ctor(PyObject* args, PyObject* kwargs); +at::Tensor legacy_tensor_ctor( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PyObject* args, + PyObject* kwargs); +at::Tensor legacy_tensor_new( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PyObject* args, + PyObject* kwargs); +at::Tensor indexing_tensor_from_data( + c10::TensorOptions options, + at::ScalarType scalar_type, + std::optional device, + PyObject* data); +at::Tensor sparse_coo_tensor_ctor( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PythonArgs& r); +void _validate_sparse_coo_tensor_args( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PyObject* args, + PyObject* kwargs); + +at::Tensor sparse_compressed_tensor_ctor( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PythonArgs& r); +at::Tensor sparse_csr_tensor_ctor( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PythonArgs& r); +at::Tensor sparse_csc_tensor_ctor( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PythonArgs& r); +at::Tensor sparse_bsr_tensor_ctor( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PythonArgs& r); +at::Tensor sparse_bsc_tensor_ctor( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PythonArgs& r); + +void _validate_sparse_compressed_tensor_args( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PyObject* args, + PyObject* kwargs); +void _validate_sparse_csr_tensor_args( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PyObject* args, + PyObject* kwargs); +void _validate_sparse_csc_tensor_args( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PyObject* args, + PyObject* kwargs); +void _validate_sparse_bsr_tensor_args( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PyObject* args, + PyObject* kwargs); +void _validate_sparse_bsc_tensor_args( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PyObject* args, + PyObject* kwargs); + +at::Tensor tensor_ctor( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PythonArgs& r); +at::Tensor as_tensor( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PythonArgs& r); +at::Tensor new_tensor( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PyObject* args, + PyObject* kwargs); +at::Tensor new_ones( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PyObject* args, + PyObject* kwargs); +at::Tensor tensor_frombuffer( + PyObject* buffer, + at::ScalarType dtype, + int64_t count, + int64_t offset, + bool requires_grad); +at::Tensor tensor_fromDLPack(PyObject* data); +at::Tensor asarray( + PyObject* obj, + std::optional dtype, + std::optional device, + std::optional copy, + bool requires_grad); +} // namespace torch::utils diff --git a/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/variadic.h b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/variadic.h new file mode 100644 index 0000000000000000000000000000000000000000..44fe1028fe5c4eeaff2b73ce01771fea8e94a6f4 --- /dev/null +++ b/mplug_owl2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/variadic.h @@ -0,0 +1,108 @@ +#pragma once + +#include +#include +#include + +#include +#include + +namespace torch { + +using at::IterArgs; + +struct CountTensors : IterArgs { + size_t out = 0; + void operator()(const at::Tensor& x) { + out += 1; + } + void operator()(const std::optional& x) { + out += x.has_value(); + } + void operator()(at::ArrayRef xs) { + out += xs.size(); + } +}; + +template +size_t count_tensors(Args&&... args) { + return CountTensors().apply(std::forward(args)...).out; +} + +struct CountVariables : IterArgs { + size_t out = 0; + void operator()(const autograd::Variable& x) { + out += 1; + } + void operator()(at::ArrayRef xs) { + out += xs.size(); + } +}; + +template +inline size_t count_variables(Args&&... args) { + return CountVariables().apply(std::forward(args)...).out; +} + +//===----------------------------------------------------------------------===// +// std::index_sequence shim for C++11 +//===----------------------------------------------------------------------===// + +// A container of type-template parameter indices. +template +struct Indices {}; + +// Decrements the index N, adds N-1 to the list of indices and forwards +// whatever we already have. +template +struct MakeIndices : MakeIndices {}; + +// Partial specialization that forms our base case. When N is zero, we stop +// and define a typedef that will be visible to earlier classes due to +// inheritance. The typedef we define is an index list containing the numbers +// 0 through N-1. +template +struct MakeIndices<0, Is...> { + using indices = Indices; +}; + +//===----------------------------------------------------------------------===// +// Utilities +//===----------------------------------------------------------------------===// + +template +void apply(Function function, Ts&&... ts) { + // https://stackoverflow.com/questions/13978916/inserting-a-variadic-argument-list-into-a-vector + // Creates a dummy array, so that each function call is evaluated in order. + // `(function(), 0)` is because `function` should (!) return `void`, so + // according to the comma operator, it is evaluated and its result (`void`) + // is discarded. Then the zero is evaluated and used as an element in the + // array. The first zero ensures the array is not empty. + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + int _[]{0, (function(std::forward(ts)), 0)...}; + (void)_; +} + +template < + typename ReturnType, + typename... Ts, + typename Function, + typename Accessor> +ReturnType unpack(Function function, Accessor accessor) { + return ReturnType(unpack( + std::move(function), + std::move(accessor), + typename MakeIndices::indices())); +} + +template < + typename ReturnType, + typename... Ts, + typename Function, + typename Accessor, + size_t... Is> +ReturnType unpack(Function function, Accessor accessor, Indices) { + return ReturnType(function(accessor.template operator()(Is)...)); +} + +} // namespace torch diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/all.py b/openflamingo/lib/python3.10/site-packages/nltk/test/all.py new file mode 100644 index 0000000000000000000000000000000000000000..50284096e29af967c13925a87f45186a149ab09b --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/all.py @@ -0,0 +1,26 @@ +"""Test suite that runs all NLTK tests. + +This module, `nltk.test.all`, is named as the NLTK ``test_suite`` in the +project's ``setup-eggs.py`` file. Here, we create a test suite that +runs all of our doctests, and return it for processing by the setuptools +test harness. + +""" + +import doctest +import os.path +import unittest +from glob import glob + + +def additional_tests(): + # print("here-000000000000000") + # print("-----", glob(os.path.join(os.path.dirname(__file__), '*.doctest'))) + dir = os.path.dirname(__file__) + paths = glob(os.path.join(dir, "*.doctest")) + files = [os.path.basename(path) for path in paths] + return unittest.TestSuite([doctest.DocFileSuite(file) for file in files]) + + +# if os.path.split(path)[-1] != 'index.rst' +# skips time-dependent doctest in index.rst diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/bnc.doctest b/openflamingo/lib/python3.10/site-packages/nltk/test/bnc.doctest new file mode 100644 index 0000000000000000000000000000000000000000..c0eaa2137b965db4fa545ba605e3614f62044bb0 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/bnc.doctest @@ -0,0 +1,60 @@ +.. Copyright (C) 2001-2024 NLTK Project +.. For license information, see LICENSE.TXT + + >>> import os.path + + >>> from nltk.corpus.reader import BNCCorpusReader + >>> import nltk.test + + >>> root = os.path.dirname(nltk.test.__file__) + >>> bnc = BNCCorpusReader(root=root, fileids='FX8.xml') + +Checking the word access. +------------------------- + + >>> len(bnc.words()) + 151 + + >>> bnc.words()[:6] + ['Ah', 'there', 'we', 'are', ',', '.'] + >>> bnc.words(stem=True)[:6] + ['ah', 'there', 'we', 'be', ',', '.'] + + >>> bnc.tagged_words()[:6] + [('Ah', 'INTERJ'), ('there', 'ADV'), ('we', 'PRON'), ('are', 'VERB'), (',', 'PUN'), ('.', 'PUN')] + + >>> bnc.tagged_words(c5=True)[:6] + [('Ah', 'ITJ'), ('there', 'AV0'), ('we', 'PNP'), ('are', 'VBB'), (',', 'PUN'), ('.', 'PUN')] + +Testing access to the sentences. +-------------------------------- + + >>> len(bnc.sents()) + 15 + + >>> bnc.sents()[0] + ['Ah', 'there', 'we', 'are', ',', '.'] + >>> bnc.sents(stem=True)[0] + ['ah', 'there', 'we', 'be', ',', '.'] + + >>> bnc.tagged_sents()[0] + [('Ah', 'INTERJ'), ('there', 'ADV'), ('we', 'PRON'), ('are', 'VERB'), (',', 'PUN'), ('.', 'PUN')] + >>> bnc.tagged_sents(c5=True)[0] + [('Ah', 'ITJ'), ('there', 'AV0'), ('we', 'PNP'), ('are', 'VBB'), (',', 'PUN'), ('.', 'PUN')] + +A not lazy loader. +------------------ + + >>> eager = BNCCorpusReader(root=root, fileids=r'FX8.xml', lazy=False) + + >>> len(eager.words()) + 151 + >>> eager.words(stem=True)[6:17] + ['right', 'abdominal', 'wound', ',', 'she', 'be', 'a', 'wee', 'bit', 'confuse', '.'] + + >>> eager.tagged_words()[6:11] + [('Right', 'ADV'), ('abdominal', 'ADJ'), ('wound', 'SUBST'), (',', 'PUN'), ('she', 'PRON')] + >>> eager.tagged_words(c5=True)[6:17] + [('Right', 'AV0'), ('abdominal', 'AJ0'), ('wound', 'NN1'), (',', 'PUN'), ('she', 'PNP'), ("'s", 'VBZ'), ('a', 'AT0'), ('wee', 'AJ0-NN1'), ('bit', 'NN1'), ('confused', 'VVN-AJ0'), ('.', 'PUN')] + >>> len(eager.sents()) + 15 diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/childes_fixt.py b/openflamingo/lib/python3.10/site-packages/nltk/test/childes_fixt.py new file mode 100644 index 0000000000000000000000000000000000000000..3e9a69e4dab9670fa7d19b24d24e5eb72322f359 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/childes_fixt.py @@ -0,0 +1,13 @@ +def setup_module(): + import pytest + + import nltk.data + + try: + nltk.data.find("corpora/childes/data-xml/Eng-USA-MOR/") + except LookupError as e: + pytest.skip( + "The CHILDES corpus is not found. " + "It should be manually downloaded and saved/unpacked " + "to [NLTK_Data_Dir]/corpora/childes/" + ) diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/chunk.doctest b/openflamingo/lib/python3.10/site-packages/nltk/test/chunk.doctest new file mode 100644 index 0000000000000000000000000000000000000000..1e64186cfad92a280aae43fabe452c7e239903d9 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/chunk.doctest @@ -0,0 +1,372 @@ +.. Copyright (C) 2001-2024 NLTK Project +.. For license information, see LICENSE.TXT + +========== + Chunking +========== + + >>> from nltk.chunk import * + >>> from nltk.chunk.util import * + >>> from nltk.chunk.regexp import * + >>> from nltk import Tree + + >>> tagged_text = "[ The/DT cat/NN ] sat/VBD on/IN [ the/DT mat/NN ] [ the/DT dog/NN ] chewed/VBD ./." + >>> gold_chunked_text = tagstr2tree(tagged_text) + >>> unchunked_text = gold_chunked_text.flatten() + +Chunking uses a special regexp syntax for rules that delimit the chunks. These +rules must be converted to 'regular' regular expressions before a sentence can +be chunked. + + >>> tag_pattern = "
?*" + >>> regexp_pattern = tag_pattern2re_pattern(tag_pattern) + >>> regexp_pattern + '(<(DT)>)?(<(JJ)>)*(<(NN[^\\{\\}<>]*)>)' + +Construct some new chunking rules. + + >>> chunk_rule = ChunkRule(r"<.*>+", "Chunk everything") + >>> strip_rule = StripRule(r"", "Strip on verbs/prepositions") + >>> split_rule = SplitRule("
", "
", + ... "Split successive determiner/noun pairs") + + +Create and score a series of chunk parsers, successively more complex. + + >>> chunk_parser = RegexpChunkParser([chunk_rule], chunk_label='NP') + >>> chunked_text = chunk_parser.parse(unchunked_text) + >>> print(chunked_text) + (S + (NP + The/DT + cat/NN + sat/VBD + on/IN + the/DT + mat/NN + the/DT + dog/NN + chewed/VBD + ./.)) + + >>> chunkscore = ChunkScore() + >>> chunkscore.score(gold_chunked_text, chunked_text) + >>> print(chunkscore.precision()) + 0.0 + + >>> print(chunkscore.recall()) + 0.0 + + >>> print(chunkscore.f_measure()) + 0 + + >>> for chunk in sorted(chunkscore.missed()): print(chunk) + (NP The/DT cat/NN) + (NP the/DT dog/NN) + (NP the/DT mat/NN) + + >>> for chunk in chunkscore.incorrect(): print(chunk) + (NP + The/DT + cat/NN + sat/VBD + on/IN + the/DT + mat/NN + the/DT + dog/NN + chewed/VBD + ./.) + + >>> chunk_parser = RegexpChunkParser([chunk_rule, strip_rule], + ... chunk_label='NP') + >>> chunked_text = chunk_parser.parse(unchunked_text) + >>> print(chunked_text) + (S + (NP The/DT cat/NN) + sat/VBD + on/IN + (NP the/DT mat/NN the/DT dog/NN) + chewed/VBD + ./.) + >>> assert chunked_text == chunk_parser.parse(list(unchunked_text)) + + >>> chunkscore = ChunkScore() + >>> chunkscore.score(gold_chunked_text, chunked_text) + >>> chunkscore.precision() + 0.5 + + >>> print(chunkscore.recall()) + 0.33333333... + + >>> print(chunkscore.f_measure()) + 0.4 + + >>> for chunk in sorted(chunkscore.missed()): print(chunk) + (NP the/DT dog/NN) + (NP the/DT mat/NN) + + >>> for chunk in chunkscore.incorrect(): print(chunk) + (NP the/DT mat/NN the/DT dog/NN) + + >>> chunk_parser = RegexpChunkParser([chunk_rule, strip_rule, split_rule], + ... chunk_label='NP') + >>> chunked_text = chunk_parser.parse(unchunked_text, trace=True) + # Input: +
<.> + # Chunk everything: + {
<.>} + # Strip on verbs/prepositions: + {
} {
} <.> + # Split successive determiner/noun pairs: + {
} {
}{
} <.> + >>> print(chunked_text) + (S + (NP The/DT cat/NN) + sat/VBD + on/IN + (NP the/DT mat/NN) + (NP the/DT dog/NN) + chewed/VBD + ./.) + + >>> chunkscore = ChunkScore() + >>> chunkscore.score(gold_chunked_text, chunked_text) + >>> chunkscore.precision() + 1.0 + + >>> chunkscore.recall() + 1.0 + + >>> chunkscore.f_measure() + 1.0 + + >>> chunkscore.missed() + [] + + >>> chunkscore.incorrect() + [] + + >>> chunk_parser.rules() + [+'>, '>, + ', '
'>] + +Printing parsers: + + >>> print(repr(chunk_parser)) + + >>> print(chunk_parser) + RegexpChunkParser with 3 rules: + Chunk everything + +'> + Strip on verbs/prepositions + '> + Split successive determiner/noun pairs + ', '
'> + +Regression Tests +~~~~~~~~~~~~~~~~ +ChunkParserI +------------ +`ChunkParserI` is an abstract interface -- it is not meant to be +instantiated directly. + + >>> ChunkParserI().parse([]) + Traceback (most recent call last): + . . . + NotImplementedError + + +ChunkString +----------- +ChunkString can be built from a tree of tagged tuples, a tree of +trees, or a mixed list of both: + + >>> t1 = Tree('S', [('w%d' % i, 't%d' % i) for i in range(10)]) + >>> t2 = Tree('S', [Tree('t0', []), Tree('t1', ['c1'])]) + >>> t3 = Tree('S', [('w0', 't0'), Tree('t1', ['c1'])]) + >>> ChunkString(t1) + '> + >>> ChunkString(t2) + '> + >>> ChunkString(t3) + '> + +Other values generate an error: + + >>> ChunkString(Tree('S', ['x'])) + Traceback (most recent call last): + . . . + ValueError: chunk structures must contain tagged tokens or trees + +The `str()` for a chunk string adds spaces to it, which makes it line +up with `str()` output for other chunk strings over the same +underlying input. + + >>> cs = ChunkString(t1) + >>> print(cs) + + >>> cs.xform('', '{}') + >>> print(cs) + {} + +The `_verify()` method makes sure that our transforms don't corrupt +the chunk string. By setting debug_level=2, `_verify()` will be +called at the end of every call to `xform`. + + >>> cs = ChunkString(t1, debug_level=3) + + >>> # tag not marked with <...>: + >>> cs.xform('', 't3') + Traceback (most recent call last): + . . . + ValueError: Transformation generated invalid chunkstring: + t3 + + >>> # brackets not balanced: + >>> cs.xform('', '{') + Traceback (most recent call last): + . . . + ValueError: Transformation generated invalid chunkstring: + { + + >>> # nested brackets: + >>> cs.xform('', '{{}}') + Traceback (most recent call last): + . . . + ValueError: Transformation generated invalid chunkstring: + {{}} + + >>> # modified tags: + >>> cs.xform('', '') + Traceback (most recent call last): + . . . + ValueError: Transformation generated invalid chunkstring: tag changed + + >>> # added tags: + >>> cs.xform('', '') + Traceback (most recent call last): + . . . + ValueError: Transformation generated invalid chunkstring: tag changed + +Chunking Rules +-------------- + +Test the different rule constructors & __repr__ methods: + + >>> r1 = RegexpChunkRule(''+ChunkString.IN_STRIP_PATTERN, + ... '{}', 'chunk and ') + >>> r2 = RegexpChunkRule(re.compile(''+ChunkString.IN_STRIP_PATTERN), + ... '{}', 'chunk and ') + >>> r3 = ChunkRule('', 'chunk and ') + >>> r4 = StripRule('', 'strip and ') + >>> r5 = UnChunkRule('', 'unchunk and ') + >>> r6 = MergeRule('', '', 'merge w/ ') + >>> r7 = SplitRule('', '', 'split from ') + >>> r8 = ExpandLeftRule('', '', 'expand left ') + >>> r9 = ExpandRightRule('', '', 'expand right ') + >>> for rule in r1, r2, r3, r4, r5, r6, r7, r8, r9: + ... print(rule) + (?=[^\\}]*(\\{|$))'->'{}'> + (?=[^\\}]*(\\{|$))'->'{}'> + '> + '> + '> + ', ''> + ', ''> + ', ''> + ', ''> + +`tag_pattern2re_pattern()` complains if the tag pattern looks problematic: + + >>> tag_pattern2re_pattern('{}') + Traceback (most recent call last): + . . . + ValueError: Bad tag pattern: '{}' + +RegexpChunkParser +----------------- + +A warning is printed when parsing an empty sentence: + + >>> parser = RegexpChunkParser([ChunkRule('', '')]) + >>> parser.parse(Tree('S', [])) + Warning: parsing empty text + Tree('S', []) + +RegexpParser +------------ + + >>> parser = RegexpParser(''' + ... NP: {
? * *} # NP + ... P: {} # Preposition + ... V: {} # Verb + ... PP: {

} # PP -> P NP + ... VP: { *} # VP -> V (NP|PP)* + ... ''') + >>> print(repr(parser)) + + >>> print(parser) + chunk.RegexpParser with 5 stages: + RegexpChunkParser with 1 rules: + NP ? * *'> + RegexpChunkParser with 1 rules: + Preposition '> + RegexpChunkParser with 1 rules: + Verb '> + RegexpChunkParser with 1 rules: + PP -> P NP '> + RegexpChunkParser with 1 rules: + VP -> V (NP|PP)* *'> + >>> print(parser.parse(unchunked_text, trace=True)) + # Input: +

<.> + # NP: + {
} {
}{
} <.> + # Input: + <.> + # Preposition: + {} <.> + # Input: +

<.> + # Verb: + {}

{} <.> + # Input: +

<.> + # PP -> P NP: + {

} <.> + # Input: + <.> + # VP -> V (NP|PP)*: + { }{} <.> + (S + (NP The/DT cat/NN) + (VP + (V sat/VBD) + (PP (P on/IN) (NP the/DT mat/NN)) + (NP the/DT dog/NN)) + (VP (V chewed/VBD)) + ./.) + +Test parsing of other rule types: + + >>> print(RegexpParser(''' + ... X: + ... }{ # strip rule + ... }{ # split rule + ... {} # merge rule + ... {} # chunk rule w/ context + ... ''')) + chunk.RegexpParser with 1 stages: + RegexpChunkParser with 4 rules: + strip rule '> + split rule ', ''> + merge rule ', ''> + chunk rule w/ context ', '', ''> + +Illegal patterns give an error message: + + >>> print(RegexpParser('X: {} {}')) + Traceback (most recent call last): + . . . + ValueError: Illegal chunk pattern: {} {} diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/classify.doctest b/openflamingo/lib/python3.10/site-packages/nltk/test/classify.doctest new file mode 100644 index 0000000000000000000000000000000000000000..1c10597fd5758423e1a1de6ed11e96b63ace4015 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/classify.doctest @@ -0,0 +1,202 @@ +.. Copyright (C) 2001-2024 NLTK Project +.. For license information, see LICENSE.TXT + +============= + Classifiers +============= + + >>> from nltk.test.classify_fixt import setup_module + >>> setup_module() + +Classifiers label tokens with category labels (or *class labels*). +Typically, labels are represented with strings (such as ``"health"`` +or ``"sports"``. In NLTK, classifiers are defined using classes that +implement the `ClassifierI` interface, which supports the following operations: + +- self.classify(featureset) +- self.classify_many(featuresets) +- self.labels() +- self.prob_classify(featureset) +- self.prob_classify_many(featuresets) + +NLTK defines several classifier classes: + +- `ConditionalExponentialClassifier` +- `DecisionTreeClassifier` +- `MaxentClassifier` +- `NaiveBayesClassifier` +- `WekaClassifier` + +Classifiers are typically created by training them on a training +corpus. + + +Regression Tests +~~~~~~~~~~~~~~~~ + +We define a very simple training corpus with 3 binary features: ['a', +'b', 'c'], and are two labels: ['x', 'y']. We use a simple feature set so +that the correct answers can be calculated analytically (although we +haven't done this yet for all tests). + + >>> import nltk + >>> train = [ + ... (dict(a=1,b=1,c=1), 'y'), + ... (dict(a=1,b=1,c=1), 'x'), + ... (dict(a=1,b=1,c=0), 'y'), + ... (dict(a=0,b=1,c=1), 'x'), + ... (dict(a=0,b=1,c=1), 'y'), + ... (dict(a=0,b=0,c=1), 'y'), + ... (dict(a=0,b=1,c=0), 'x'), + ... (dict(a=0,b=0,c=0), 'x'), + ... (dict(a=0,b=1,c=1), 'y'), + ... (dict(a=None,b=1,c=0), 'x'), + ... ] + >>> test = [ + ... (dict(a=1,b=0,c=1)), # unseen + ... (dict(a=1,b=0,c=0)), # unseen + ... (dict(a=0,b=1,c=1)), # seen 3 times, labels=y,y,x + ... (dict(a=0,b=1,c=0)), # seen 1 time, label=x + ... ] + +Test the Naive Bayes classifier: + + >>> classifier = nltk.classify.NaiveBayesClassifier.train(train) + >>> sorted(classifier.labels()) + ['x', 'y'] + >>> classifier.classify_many(test) + ['y', 'x', 'y', 'x'] + >>> for pdist in classifier.prob_classify_many(test): + ... print('%.4f %.4f' % (pdist.prob('x'), pdist.prob('y'))) + 0.2500 0.7500 + 0.5833 0.4167 + 0.3571 0.6429 + 0.7000 0.3000 + >>> classifier.show_most_informative_features() + Most Informative Features + c = 0 x : y = 2.3 : 1.0 + c = 1 y : x = 1.8 : 1.0 + a = 1 y : x = 1.7 : 1.0 + a = 0 x : y = 1.0 : 1.0 + b = 0 x : y = 1.0 : 1.0 + b = 1 x : y = 1.0 : 1.0 + +Test the Decision Tree classifier (without None): + + >>> classifier = nltk.classify.DecisionTreeClassifier.train( + ... train[:-1], entropy_cutoff=0, + ... support_cutoff=0) + >>> sorted(classifier.labels()) + ['x', 'y'] + >>> print(classifier) + c=0? .................................................. x + a=0? ................................................ x + a=1? ................................................ y + c=1? .................................................. y + + >>> classifier.classify_many(test) + ['y', 'y', 'y', 'x'] + >>> for pdist in classifier.prob_classify_many(test): + ... print('%.4f %.4f' % (pdist.prob('x'), pdist.prob('y'))) + Traceback (most recent call last): + . . . + NotImplementedError + + +Test the Decision Tree classifier (with None): + + >>> classifier = nltk.classify.DecisionTreeClassifier.train( + ... train, entropy_cutoff=0, + ... support_cutoff=0) + >>> sorted(classifier.labels()) + ['x', 'y'] + >>> print(classifier) + c=0? .................................................. x + a=0? ................................................ x + a=1? ................................................ y + a=None? ............................................. x + c=1? .................................................. y + + + +Test SklearnClassifier, which requires the scikit-learn package. + + >>> from nltk.classify import SklearnClassifier + >>> from sklearn.naive_bayes import BernoulliNB + >>> from sklearn.svm import SVC + >>> train_data = [({"a": 4, "b": 1, "c": 0}, "ham"), + ... ({"a": 5, "b": 2, "c": 1}, "ham"), + ... ({"a": 0, "b": 3, "c": 4}, "spam"), + ... ({"a": 5, "b": 1, "c": 1}, "ham"), + ... ({"a": 1, "b": 4, "c": 3}, "spam")] + >>> classif = SklearnClassifier(BernoulliNB()).train(train_data) + >>> test_data = [{"a": 3, "b": 2, "c": 1}, + ... {"a": 0, "b": 3, "c": 7}] + >>> classif.classify_many(test_data) + ['ham', 'spam'] + >>> classif = SklearnClassifier(SVC(), sparse=False).train(train_data) + >>> classif.classify_many(test_data) + ['ham', 'spam'] + +Test the Maximum Entropy classifier training algorithms; they should all +generate the same results. + + >>> def print_maxent_test_header(): + ... print(' '*11+''.join([' test[%s] ' % i + ... for i in range(len(test))])) + ... print(' '*11+' p(x) p(y)'*len(test)) + ... print('-'*(11+15*len(test))) + + >>> def test_maxent(algorithm): + ... print('%11s' % algorithm, end=' ') + ... try: + ... classifier = nltk.classify.MaxentClassifier.train( + ... train, algorithm, trace=0, max_iter=1000) + ... except Exception as e: + ... print('Error: %r' % e) + ... return + ... + ... for featureset in test: + ... pdist = classifier.prob_classify(featureset) + ... print('%8.2f%6.2f' % (pdist.prob('x'), pdist.prob('y')), end=' ') + ... print() + + >>> print_maxent_test_header(); test_maxent('GIS'); test_maxent('IIS') + test[0] test[1] test[2] test[3] + p(x) p(y) p(x) p(y) p(x) p(y) p(x) p(y) + ----------------------------------------------------------------------- + GIS 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24 + IIS 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24 + + >>> test_maxent('MEGAM'); test_maxent('TADM') # doctest: +SKIP + MEGAM 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24 + TADM 0.16 0.84 0.46 0.54 0.41 0.59 0.76 0.24 + + + +Regression tests for TypedMaxentFeatureEncoding +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + >>> from nltk.classify import maxent + >>> train = [ + ... ({'a': 1, 'b': 1, 'c': 1}, 'y'), + ... ({'a': 5, 'b': 5, 'c': 5}, 'x'), + ... ({'a': 0.9, 'b': 0.9, 'c': 0.9}, 'y'), + ... ({'a': 5.5, 'b': 5.4, 'c': 5.3}, 'x'), + ... ({'a': 0.8, 'b': 1.2, 'c': 1}, 'y'), + ... ({'a': 5.1, 'b': 4.9, 'c': 5.2}, 'x') + ... ] + + >>> test = [ + ... {'a': 1, 'b': 0.8, 'c': 1.2}, + ... {'a': 5.2, 'b': 5.1, 'c': 5} + ... ] + + >>> encoding = maxent.TypedMaxentFeatureEncoding.train( + ... train, count_cutoff=3, alwayson_features=True) + + >>> classifier = maxent.MaxentClassifier.train( + ... train, bernoulli=False, encoding=encoding, trace=0) + + >>> classifier.classify_many(test) + ['y', 'x'] diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/conftest.py b/openflamingo/lib/python3.10/site-packages/nltk/test/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..b6c70033b45760058278e1dfc45076847ae6d2c8 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/conftest.py @@ -0,0 +1,33 @@ +import pytest + +from nltk.corpus.reader import CorpusReader + + +@pytest.fixture(autouse=True) +def mock_plot(mocker): + """Disable matplotlib plotting in test code""" + + try: + import matplotlib.pyplot as plt + + mocker.patch.object(plt, "gca") + mocker.patch.object(plt, "show") + except ImportError: + pass + + +@pytest.fixture(scope="module", autouse=True) +def teardown_loaded_corpora(): + """ + After each test session ends (either doctest or unit test), + unload any loaded corpora + """ + + yield # first, wait for the test to end + + import nltk.corpus + + for name in dir(nltk.corpus): + obj = getattr(nltk.corpus, name, None) + if isinstance(obj, CorpusReader) and hasattr(obj, "_unload"): + obj._unload() diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/corpus.doctest b/openflamingo/lib/python3.10/site-packages/nltk/test/corpus.doctest new file mode 100644 index 0000000000000000000000000000000000000000..4355950a78b65f6a60a6b54a6677efb8ef9db634 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/corpus.doctest @@ -0,0 +1,2336 @@ +.. Copyright (C) 2001-2024 NLTK Project +.. For license information, see LICENSE.TXT + +================ + Corpus Readers +================ + +The `nltk.corpus` package defines a collection of *corpus reader* +classes, which can be used to access the contents of a diverse set of +corpora. The list of available corpora is given at: + +https://www.nltk.org/nltk_data/ + +Each corpus reader class is specialized to handle a specific +corpus format. In addition, the `nltk.corpus` package automatically +creates a set of corpus reader instances that can be used to access +the corpora in the NLTK data package. +Section `Corpus Reader Objects`_ ("Corpus Reader Objects") describes +the corpus reader instances that can be used to read the corpora in +the NLTK data package. Section `Corpus Reader Classes`_ ("Corpus +Reader Classes") describes the corpus reader classes themselves, and +discusses the issues involved in creating new corpus reader objects +and new corpus reader classes. Section `Regression Tests`_ +("Regression Tests") contains regression tests for the corpus readers +and associated functions and classes. + +.. contents:: **Table of Contents** + :depth: 4 + :backlinks: none + +--------------------- +Corpus Reader Objects +--------------------- + +Overview +======== + +NLTK includes a diverse set of corpora which can be +read using the ``nltk.corpus`` package. Each corpus is accessed by +means of a "corpus reader" object from ``nltk.corpus``: + + >>> import nltk.corpus + >>> # The Brown corpus: + >>> print(str(nltk.corpus.brown).replace('\\\\','/')) + + >>> # The Penn Treebank Corpus: + >>> print(str(nltk.corpus.treebank).replace('\\\\','/')) + + >>> # The Name Genders Corpus: + >>> print(str(nltk.corpus.names).replace('\\\\','/')) + + >>> # The Inaugural Address Corpus: + >>> print(str(nltk.corpus.inaugural).replace('\\\\','/')) + + +Most corpora consist of a set of files, each containing a document (or +other pieces of text). A list of identifiers for these files is +accessed via the ``fileids()`` method of the corpus reader: + + >>> nltk.corpus.treebank.fileids() + ['wsj_0001.mrg', 'wsj_0002.mrg', 'wsj_0003.mrg', 'wsj_0004.mrg', ...] + >>> nltk.corpus.inaugural.fileids() + ['1789-Washington.txt', '1793-Washington.txt', '1797-Adams.txt', ...] + +Each corpus reader provides a variety of methods to read data from the +corpus, depending on the format of the corpus. For example, plaintext +corpora support methods to read the corpus as raw text, a list of +words, a list of sentences, or a list of paragraphs. + + >>> from nltk.corpus import inaugural + >>> inaugural.raw('1789-Washington.txt') + 'Fellow-Citizens of the Senate ...' + >>> inaugural.words('1789-Washington.txt') + ['Fellow', '-', 'Citizens', 'of', 'the', ...] + >>> inaugural.sents('1789-Washington.txt') + [['Fellow', '-', 'Citizens'...], ['Among', 'the', 'vicissitudes'...]...] + >>> inaugural.paras('1789-Washington.txt') + [[['Fellow', '-', 'Citizens'...]], + [['Among', 'the', 'vicissitudes'...], + ['On', 'the', 'one', 'hand', ',', 'I'...]...]...] + +Each of these reader methods may be given a single document's item +name or a list of document item names. When given a list of document +item names, the reader methods will concatenate together the contents +of the individual documents. + + >>> l1 = len(inaugural.words('1789-Washington.txt')) + >>> l2 = len(inaugural.words('1793-Washington.txt')) + >>> l3 = len(inaugural.words(['1789-Washington.txt', '1793-Washington.txt'])) + >>> print('%s+%s == %s' % (l1, l2, l3)) + 1538+147 == 1685 + +If the reader methods are called without any arguments, they will +typically load all documents in the corpus. + + >>> len(inaugural.words()) + 152901 + +If a corpus contains a README file, it can be accessed with a ``readme()`` method: + + >>> inaugural.readme()[:32] + 'C-Span Inaugural Address Corpus\n' + +Plaintext Corpora +================= + +Here are the first few words from each of NLTK's plaintext corpora: + + >>> nltk.corpus.abc.words() + ['PM', 'denies', 'knowledge', 'of', 'AWB', ...] + >>> nltk.corpus.genesis.words() + ['In', 'the', 'beginning', 'God', 'created', ...] + >>> nltk.corpus.gutenberg.words(fileids='austen-emma.txt') + ['[', 'Emma', 'by', 'Jane', 'Austen', '1816', ...] + >>> nltk.corpus.inaugural.words() + ['Fellow', '-', 'Citizens', 'of', 'the', ...] + >>> nltk.corpus.state_union.words() + ['PRESIDENT', 'HARRY', 'S', '.', 'TRUMAN', "'", ...] + >>> nltk.corpus.webtext.words() + ['Cookie', 'Manager', ':', '"', 'Don', "'", 't', ...] + +Tagged Corpora +============== + +In addition to the plaintext corpora, NLTK's data package also +contains a wide variety of annotated corpora. For example, the Brown +Corpus is annotated with part-of-speech tags, and defines additional +methods ``tagged_*()`` which words as `(word,tag)` tuples, rather +than just bare word strings. + + >>> from nltk.corpus import brown + >>> print(brown.words()) + ['The', 'Fulton', 'County', 'Grand', 'Jury', ...] + >>> print(brown.tagged_words()) + [('The', 'AT'), ('Fulton', 'NP-TL'), ...] + >>> print(brown.sents()) + [['The', 'Fulton', 'County'...], ['The', 'jury', 'further'...], ...] + >>> print(brown.tagged_sents()) + [[('The', 'AT'), ('Fulton', 'NP-TL')...], + [('The', 'AT'), ('jury', 'NN'), ('further', 'RBR')...]...] + >>> print(brown.paras(categories='reviews')) + [[['It', 'is', 'not', 'news', 'that', 'Nathan', 'Milstein'...], + ['Certainly', 'not', 'in', 'Orchestra', 'Hall', 'where'...]], + [['There', 'was', 'about', 'that', 'song', 'something', ...], + ['Not', 'the', 'noblest', 'performance', 'we', 'have', ...], ...], ...] + >>> print(brown.tagged_paras(categories='reviews')) + [[[('It', 'PPS'), ('is', 'BEZ'), ('not', '*'), ...], + [('Certainly', 'RB'), ('not', '*'), ('in', 'IN'), ...]], + [[('There', 'EX'), ('was', 'BEDZ'), ('about', 'IN'), ...], + [('Not', '*'), ('the', 'AT'), ('noblest', 'JJT'), ...], ...], ...] + +Similarly, the Indian Language POS-Tagged Corpus includes samples of +Indian text annotated with part-of-speech tags: + + >>> from nltk.corpus import indian + >>> print(indian.words()) # doctest: +SKIP + ['\xe0\xa6\xae\xe0\xa6\xb9\xe0\xa6\xbf\...', + '\xe0\xa6\xb8\xe0\xa6\xa8\xe0\xa7\x8d\xe0...', ...] + >>> print(indian.tagged_words()) # doctest: +SKIP + [('\xe0\xa6\xae\xe0\xa6\xb9\xe0\xa6\xbf...', 'NN'), + ('\xe0\xa6\xb8\xe0\xa6\xa8\xe0\xa7\x8d\xe0...', 'NN'), ...] + +Several tagged corpora support access to a simplified, universal tagset, e.g. where all nouns +tags are collapsed to a single category ``NOUN``: + + >>> print(brown.tagged_sents(tagset='universal')) + [[('The', 'DET'), ('Fulton', 'NOUN'), ('County', 'NOUN'), ('Grand', 'ADJ'), ('Jury', 'NOUN'), ...], + [('The', 'DET'), ('jury', 'NOUN'), ('further', 'ADV'), ('said', 'VERB'), ('in', 'ADP'), ...]...] + >>> from nltk.corpus import conll2000, switchboard + >>> print(conll2000.tagged_words(tagset='universal')) + [('Confidence', 'NOUN'), ('in', 'ADP'), ...] + +Use ``nltk.app.pos_concordance()`` to access a GUI for searching tagged corpora. + +Chunked Corpora +=============== + +The CoNLL corpora also provide chunk structures, which are encoded as +flat trees. The CoNLL 2000 Corpus includes phrasal chunks; and the +CoNLL 2002 Corpus includes named entity chunks. + + >>> from nltk.corpus import conll2000, conll2002 + >>> print(conll2000.sents()) + [['Confidence', 'in', 'the', 'pound', 'is', 'widely', ...], + ['Chancellor', 'of', 'the', 'Exchequer', ...], ...] + >>> for tree in conll2000.chunked_sents()[:2]: + ... print(tree) + (S + (NP Confidence/NN) + (PP in/IN) + (NP the/DT pound/NN) + (VP is/VBZ widely/RB expected/VBN to/TO take/VB) + (NP another/DT sharp/JJ dive/NN) + if/IN + ...) + (S + Chancellor/NNP + (PP of/IN) + (NP the/DT Exchequer/NNP) + ...) + >>> print(conll2002.sents()) + [['Sao', 'Paulo', '(', 'Brasil', ')', ',', ...], ['-'], ...] + >>> for tree in conll2002.chunked_sents()[:2]: + ... print(tree) + (S + (LOC Sao/NC Paulo/VMI) + (/Fpa + (LOC Brasil/NC) + )/Fpt + ...) + (S -/Fg) + +.. note:: Since the CONLL corpora do not contain paragraph break + information, these readers do not support the ``para()`` method.) + +.. warning:: if you call the conll corpora reader methods without any + arguments, they will return the contents of the entire corpus, + *including* the 'test' portions of the corpus.) + +SemCor is a subset of the Brown corpus tagged with WordNet senses and +named entities. Both kinds of lexical items include multiword units, +which are encoded as chunks (senses and part-of-speech tags pertain +to the entire chunk). + + >>> from nltk.corpus import semcor + >>> semcor.words() + ['The', 'Fulton', 'County', 'Grand', 'Jury', ...] + >>> semcor.chunks() + [['The'], ['Fulton', 'County', 'Grand', 'Jury'], ...] + >>> semcor.sents() + [['The', 'Fulton', 'County', 'Grand', 'Jury', 'said', ...], + ['The', 'jury', 'further', 'said', ...], ...] + >>> semcor.chunk_sents() + [[['The'], ['Fulton', 'County', 'Grand', 'Jury'], ['said'], ... + ['.']], [['The'], ['jury'], ['further'], ['said'], ... ['.']], ...] + >>> list(map(str, semcor.tagged_chunks(tag='both')[:3])) + ['(DT The)', "(Lemma('group.n.01.group') (NE (NNP Fulton County Grand Jury)))", "(Lemma('state.v.01.say') (VB said))"] + >>> [[str(c) for c in s] for s in semcor.tagged_sents(tag='both')[:2]] + [['(DT The)', "(Lemma('group.n.01.group') (NE (NNP Fulton County Grand Jury)))", ... + '(None .)'], ['(DT The)', ... '(None .)']] + + +The IEER corpus is another chunked corpus. This corpus is unusual in +that each corpus item contains multiple documents. (This reflects the +fact that each corpus file contains multiple documents.) The IEER +corpus defines the `parsed_docs` method, which returns the documents +in a given item as `IEERDocument` objects: + + >>> from nltk.corpus import ieer + >>> ieer.fileids() + ['APW_19980314', 'APW_19980424', 'APW_19980429', + 'NYT_19980315', 'NYT_19980403', 'NYT_19980407'] + >>> docs = ieer.parsed_docs('APW_19980314') + >>> print(docs[0]) + + >>> print(docs[0].docno) + APW19980314.0391 + >>> print(docs[0].doctype) + NEWS STORY + >>> print(docs[0].date_time) + 03/14/1998 10:36:00 + >>> print(docs[0].headline) + (DOCUMENT Kenyans protest tax hikes) + >>> print(docs[0].text) + (DOCUMENT + (LOCATION NAIROBI) + , + (LOCATION Kenya) + ( + (ORGANIZATION AP) + ) + _ + (CARDINAL Thousands) + of + laborers, + ... + on + (DATE Saturday) + ...) + +Parsed Corpora +============== + +The Treebank corpora provide a syntactic parse for each sentence. The +NLTK data package includes a 10% sample of the Penn Treebank (in +``treebank``), as well as the Sinica Treebank (in ``sinica_treebank``). + +Reading the Penn Treebank (Wall Street Journal sample): + + >>> from nltk.corpus import treebank + >>> print(treebank.fileids()) + ['wsj_0001.mrg', 'wsj_0002.mrg', 'wsj_0003.mrg', 'wsj_0004.mrg', ...] + >>> print(treebank.words('wsj_0003.mrg')) + ['A', 'form', 'of', 'asbestos', 'once', 'used', ...] + >>> print(treebank.tagged_words('wsj_0003.mrg')) + [('A', 'DT'), ('form', 'NN'), ('of', 'IN'), ...] + >>> print(treebank.parsed_sents('wsj_0003.mrg')[0]) + (S + (S-TPC-1 + (NP-SBJ + (NP (NP (DT A) (NN form)) (PP (IN of) (NP (NN asbestos)))) + (RRC ...)...)...) + ... + (VP (VBD reported) (SBAR (-NONE- 0) (S (-NONE- *T*-1)))) + (. .)) + +If you have access to a full installation of the Penn Treebank, NLTK +can be configured to load it as well. Download the ``ptb`` package, +and in the directory ``nltk_data/corpora/ptb`` place the ``BROWN`` +and ``WSJ`` directories of the Treebank installation (symlinks work +as well). Then use the ``ptb`` module instead of ``treebank``: + + >>> from nltk.corpus import ptb + >>> print(ptb.fileids()) # doctest: +SKIP + ['BROWN/CF/CF01.MRG', 'BROWN/CF/CF02.MRG', 'BROWN/CF/CF03.MRG', 'BROWN/CF/CF04.MRG', ...] + >>> print(ptb.words('WSJ/00/WSJ_0003.MRG')) # doctest: +SKIP + ['A', 'form', 'of', 'asbestos', 'once', 'used', '*', ...] + >>> print(ptb.tagged_words('WSJ/00/WSJ_0003.MRG')) # doctest: +SKIP + [('A', 'DT'), ('form', 'NN'), ('of', 'IN'), ...] + +...and so forth, like ``treebank`` but with extended fileids. Categories +specified in ``allcats.txt`` can be used to filter by genre; they consist +of ``news`` (for WSJ articles) and names of the Brown subcategories +(``fiction``, ``humor``, ``romance``, etc.): + + >>> ptb.categories() # doctest: +SKIP + ['adventure', 'belles_lettres', 'fiction', 'humor', 'lore', 'mystery', 'news', 'romance', 'science_fiction'] + >>> print(ptb.fileids('news')) # doctest: +SKIP + ['WSJ/00/WSJ_0001.MRG', 'WSJ/00/WSJ_0002.MRG', 'WSJ/00/WSJ_0003.MRG', ...] + >>> print(ptb.words(categories=['humor','fiction'])) # doctest: +SKIP + ['Thirty-three', 'Scotty', 'did', 'not', 'go', 'back', ...] + +As PropBank and NomBank depend on the (WSJ portion of the) Penn Treebank, +the modules ``propbank_ptb`` and ``nombank_ptb`` are provided for access +to a full PTB installation. + +Reading the Sinica Treebank: + + >>> from nltk.corpus import sinica_treebank + >>> print(sinica_treebank.sents()) # doctest: +SKIP + [['\xe4\xb8\x80'], ['\xe5\x8f\x8b\xe6\x83\x85'], ...] + >>> sinica_treebank.parsed_sents()[25] # doctest: +SKIP + Tree('S', + [Tree('NP', + [Tree('Nba', ['\xe5\x98\x89\xe7\x8f\x8d'])]), + Tree('V\xe2\x80\xa7\xe5\x9c\xb0', + [Tree('VA11', ['\xe4\xb8\x8d\xe5\x81\x9c']), + Tree('DE', ['\xe7\x9a\x84'])]), + Tree('VA4', ['\xe5\x93\xad\xe6\xb3\xa3'])]) + +Reading the CoNLL 2007 Dependency Treebanks: + + >>> from nltk.corpus import conll2007 + >>> conll2007.sents('esp.train')[0] # doctest: +SKIP + ['El', 'aumento', 'del', 'índice', 'de', 'desempleo', ...] + >>> conll2007.parsed_sents('esp.train')[0] # doctest: +SKIP + + >>> print(conll2007.parsed_sents('esp.train')[0].tree()) # doctest: +SKIP + (fortaleció + (aumento El (del (índice (de (desempleo estadounidense))))) + hoy + considerablemente + (al + (euro + (cotizaba + , + que + (a (15.35 las GMT)) + se + (en (mercado el (de divisas) (de Fráncfort))) + (a 0,9452_dólares) + (frente_a , (0,9349_dólares los (de (mañana esta))))))) + .) + +Word Lists and Lexicons +======================= + +The NLTK data package also includes a number of lexicons and word +lists. These are accessed just like text corpora. The following +examples illustrate the use of the wordlist corpora: + + >>> from nltk.corpus import names, stopwords, words + >>> words.fileids() + ['en', 'en-basic'] + >>> words.words('en') + ['A', 'a', 'aa', 'aal', 'aalii', 'aam', 'Aani', 'aardvark', 'aardwolf', ...] + + >>> stopwords.fileids() # doctest: +SKIP + ['arabic', 'azerbaijani', 'bengali', 'danish', 'dutch', 'english', 'finnish', 'french', ...] + >>> sorted(stopwords.words('portuguese')) + ['a', 'ao', 'aos', 'aquela', 'aquelas', 'aquele', 'aqueles', ...] + >>> names.fileids() + ['female.txt', 'male.txt'] + >>> names.words('male.txt') + ['Aamir', 'Aaron', 'Abbey', 'Abbie', 'Abbot', 'Abbott', ...] + >>> names.words('female.txt') + ['Abagael', 'Abagail', 'Abbe', 'Abbey', 'Abbi', 'Abbie', ...] + +The CMU Pronunciation Dictionary corpus contains pronunciation +transcriptions for over 100,000 words. It can be accessed as a list +of entries (where each entry consists of a word, an identifier, and a +transcription) or as a dictionary from words to lists of +transcriptions. Transcriptions are encoded as tuples of phoneme +strings. + + >>> from nltk.corpus import cmudict + >>> print(cmudict.entries()[653:659]) + [('acetate', ['AE1', 'S', 'AH0', 'T', 'EY2', 'T']), + ('acetic', ['AH0', 'S', 'EH1', 'T', 'IH0', 'K']), + ('acetic', ['AH0', 'S', 'IY1', 'T', 'IH0', 'K']), + ('aceto', ['AA0', 'S', 'EH1', 'T', 'OW0']), + ('acetochlor', ['AA0', 'S', 'EH1', 'T', 'OW0', 'K', 'L', 'AO2', 'R']), + ('acetone', ['AE1', 'S', 'AH0', 'T', 'OW2', 'N'])] + >>> # Load the entire cmudict corpus into a Python dictionary: + >>> transcr = cmudict.dict() + >>> print([transcr[w][0] for w in 'Natural Language Tool Kit'.lower().split()]) + [['N', 'AE1', 'CH', 'ER0', 'AH0', 'L'], + ['L', 'AE1', 'NG', 'G', 'W', 'AH0', 'JH'], + ['T', 'UW1', 'L'], + ['K', 'IH1', 'T']] + + +WordNet +======= + +Please see the separate WordNet howto. + +FrameNet +======== + +Please see the separate FrameNet howto. + +PropBank +======== + +Please see the separate PropBank howto. + +SentiWordNet +============ + +Please see the separate SentiWordNet howto. + +Categorized Corpora +=================== + +Several corpora included with NLTK contain documents that have been categorized for +topic, genre, polarity, etc. In addition to the standard corpus interface, these +corpora provide access to the list of categories and the mapping between the documents +and their categories (in both directions). Access the categories using the ``categories()`` +method, e.g.: + + >>> from nltk.corpus import brown, movie_reviews, reuters + >>> brown.categories() + ['adventure', 'belles_lettres', 'editorial', 'fiction', 'government', 'hobbies', 'humor', + 'learned', 'lore', 'mystery', 'news', 'religion', 'reviews', 'romance', 'science_fiction'] + >>> movie_reviews.categories() + ['neg', 'pos'] + >>> reuters.categories() + ['acq', 'alum', 'barley', 'bop', 'carcass', 'castor-oil', 'cocoa', + 'coconut', 'coconut-oil', 'coffee', 'copper', 'copra-cake', 'corn', + 'cotton', 'cotton-oil', 'cpi', 'cpu', 'crude', 'dfl', 'dlr', ...] + +This method has an optional argument that specifies a document or a list +of documents, allowing us to map from (one or more) documents to (one or more) categories: + + >>> brown.categories('ca01') + ['news'] + >>> brown.categories(['ca01','cb01']) + ['editorial', 'news'] + >>> reuters.categories('training/9865') + ['barley', 'corn', 'grain', 'wheat'] + >>> reuters.categories(['training/9865', 'training/9880']) + ['barley', 'corn', 'grain', 'money-fx', 'wheat'] + +We can go back the other way using the optional argument of the ``fileids()`` method: + + >>> reuters.fileids('barley') + ['test/15618', 'test/15649', 'test/15676', 'test/15728', 'test/15871', ...] + +Both the ``categories()`` and ``fileids()`` methods return a sorted list containing +no duplicates. + +In addition to mapping between categories and documents, these corpora permit +direct access to their contents via the categories. Instead of accessing a subset +of a corpus by specifying one or more fileids, we can identify one or more categories, e.g.: + + >>> brown.tagged_words(categories='news') + [('The', 'AT'), ('Fulton', 'NP-TL'), ...] + >>> brown.sents(categories=['editorial','reviews']) + [['Assembly', 'session', 'brought', 'much', 'good'], ['The', 'General', + 'Assembly', ',', 'which', 'adjourns', 'today', ',', 'has', 'performed', + 'in', 'an', 'atmosphere', 'of', 'crisis', 'and', 'struggle', 'from', + 'the', 'day', 'it', 'convened', '.'], ...] + +Note that it is an error to specify both documents and categories. + +In the context of a text categorization system, we can easily test if the +category assigned to a document is correct as follows: + + >>> def classify(doc): return 'news' # Trivial classifier + >>> doc = 'ca01' + >>> classify(doc) in brown.categories(doc) + True + + +Other Corpora +============= + +comparative_sentences +--------------------- +A list of sentences from various sources, especially reviews and articles. Each +line contains one sentence; sentences were separated by using a sentence tokenizer. +Comparative sentences have been annotated with their type, entities, features and +keywords. + + >>> from nltk.corpus import comparative_sentences + >>> comparison = comparative_sentences.comparisons()[0] + >>> comparison.text + ['its', 'fast-forward', 'and', 'rewind', 'work', 'much', 'more', 'smoothly', + 'and', 'consistently', 'than', 'those', 'of', 'other', 'models', 'i', "'ve", + 'had', '.'] + >>> comparison.entity_2 + 'models' + >>> (comparison.feature, comparison.keyword) + ('rewind', 'more') + >>> len(comparative_sentences.comparisons()) + 853 + +opinion_lexicon +--------------- +A list of positive and negative opinion words or sentiment words for English. + + >>> from nltk.corpus import opinion_lexicon + >>> opinion_lexicon.words()[:4] + ['2-faced', '2-faces', 'abnormal', 'abolish'] + +The OpinionLexiconCorpusReader also provides shortcuts to retrieve positive/negative +words: + + >>> opinion_lexicon.negative()[:4] + ['2-faced', '2-faces', 'abnormal', 'abolish'] + +Note that words from `words()` method in opinion_lexicon are sorted by file id, +not alphabetically: + + >>> opinion_lexicon.words()[0:10] + ['2-faced', '2-faces', 'abnormal', 'abolish', 'abominable', 'abominably', + 'abominate', 'abomination', 'abort', 'aborted'] + >>> sorted(opinion_lexicon.words())[0:10] + ['2-faced', '2-faces', 'a+', 'abnormal', 'abolish', 'abominable', 'abominably', + 'abominate', 'abomination', 'abort'] + +ppattach +-------- +The Prepositional Phrase Attachment corpus is a corpus of +prepositional phrase attachment decisions. Each instance in the +corpus is encoded as a ``PPAttachment`` object: + + >>> from nltk.corpus import ppattach + >>> ppattach.attachments('training') + [PPAttachment(sent='0', verb='join', noun1='board', + prep='as', noun2='director', attachment='V'), + PPAttachment(sent='1', verb='is', noun1='chairman', + prep='of', noun2='N.V.', attachment='N'), + ...] + >>> inst = ppattach.attachments('training')[0] + >>> (inst.sent, inst.verb, inst.noun1, inst.prep, inst.noun2) + ('0', 'join', 'board', 'as', 'director') + >>> inst.attachment + 'V' + +product_reviews_1 and product_reviews_2 +--------------------------------------- +These two datasets respectively contain annotated customer reviews of 5 and 9 +products from amazon.com. + + >>> from nltk.corpus import product_reviews_1 + >>> camera_reviews = product_reviews_1.reviews('Canon_G3.txt') + >>> review = camera_reviews[0] + >>> review.sents()[0] + ['i', 'recently', 'purchased', 'the', 'canon', 'powershot', 'g3', 'and', 'am', + 'extremely', 'satisfied', 'with', 'the', 'purchase', '.'] + >>> review.features() + [('canon powershot g3', '+3'), ('use', '+2'), ('picture', '+2'), + ('picture quality', '+1'), ('picture quality', '+1'), ('camera', '+2'), + ('use', '+2'), ('feature', '+1'), ('picture quality', '+3'), ('use', '+1'), + ('option', '+1')] + +It is also possible to reach the same information directly from the stream: + + >>> product_reviews_1.features('Canon_G3.txt') + [('canon powershot g3', '+3'), ('use', '+2'), ...] + +We can compute stats for specific product features: + + >>> n_reviews = len([(feat,score) for (feat,score) in product_reviews_1.features('Canon_G3.txt') if feat=='picture']) + >>> tot = sum([int(score) for (feat,score) in product_reviews_1.features('Canon_G3.txt') if feat=='picture']) + >>> mean = tot / n_reviews + >>> print(n_reviews, tot, mean) + 15 24 1.6 + +pros_cons +--------- +A list of pros/cons sentences for determining context (aspect) dependent +sentiment words, which are then applied to sentiment analysis of comparative +sentences. + + >>> from nltk.corpus import pros_cons + >>> pros_cons.sents(categories='Cons') + [['East', 'batteries', '!', 'On', '-', 'off', 'switch', 'too', 'easy', + 'to', 'maneuver', '.'], ['Eats', '...', 'no', ',', 'GULPS', 'batteries'], + ...] + >>> pros_cons.words('IntegratedPros.txt') + ['Easy', 'to', 'use', ',', 'economical', '!', ...] + +semcor +------ +The Brown Corpus, annotated with WordNet senses. + + >>> from nltk.corpus import semcor + >>> semcor.words('brown2/tagfiles/br-n12.xml') + ['When', 'several', 'minutes', 'had', 'passed', ...] + +senseval +-------- +The Senseval 2 corpus is a word sense disambiguation corpus. Each +item in the corpus corresponds to a single ambiguous word. For each +of these words, the corpus contains a list of instances, corresponding +to occurrences of that word. Each instance provides the word; a list +of word senses that apply to the word occurrence; and the word's +context. + + >>> from nltk.corpus import senseval + >>> senseval.fileids() + ['hard.pos', 'interest.pos', 'line.pos', 'serve.pos'] + >>> senseval.instances('hard.pos') + ... + [SensevalInstance(word='hard-a', + position=20, + context=[('``', '``'), ('he', 'PRP'), ...('hard', 'JJ'), ...], + senses=('HARD1',)), + SensevalInstance(word='hard-a', + position=10, + context=[('clever', 'NNP'), ...('hard', 'JJ'), ('time', 'NN'), ...], + senses=('HARD1',)), ...] + +The following code looks at instances of the word 'interest', and +displays their local context (2 words on each side) and word sense(s): + + >>> for inst in senseval.instances('interest.pos')[:10]: + ... p = inst.position + ... left = ' '.join(w for (w,t) in inst.context[p-2:p]) + ... word = ' '.join(w for (w,t) in inst.context[p:p+1]) + ... right = ' '.join(w for (w,t) in inst.context[p+1:p+3]) + ... senses = ' '.join(inst.senses) + ... print('%20s |%10s | %-15s -> %s' % (left, word, right, senses)) + declines in | interest | rates . -> interest_6 + indicate declining | interest | rates because -> interest_6 + in short-term | interest | rates . -> interest_6 + 4 % | interest | in this -> interest_5 + company with | interests | in the -> interest_5 + , plus | interest | . -> interest_6 + set the | interest | rate on -> interest_6 + 's own | interest | , prompted -> interest_4 + principal and | interest | is the -> interest_6 + increase its | interest | to 70 -> interest_5 + +sentence_polarity +----------------- +The Sentence Polarity dataset contains 5331 positive and 5331 negative processed +sentences. + + >>> from nltk.corpus import sentence_polarity + >>> sentence_polarity.sents() + [['simplistic', ',', 'silly', 'and', 'tedious', '.'], ["it's", 'so', 'laddish', + 'and', 'juvenile', ',', 'only', 'teenage', 'boys', 'could', 'possibly', 'find', + 'it', 'funny', '.'], ...] + >>> sentence_polarity.categories() + ['neg', 'pos'] + >>> sentence_polarity.sents()[1] + ["it's", 'so', 'laddish', 'and', 'juvenile', ',', 'only', 'teenage', 'boys', + 'could', 'possibly', 'find', 'it', 'funny', '.'] + +shakespeare +----------- +The Shakespeare corpus contains a set of Shakespeare plays, formatted +as XML files. These corpora are returned as ElementTree objects: + + >>> from nltk.corpus import shakespeare + >>> from xml.etree import ElementTree + >>> shakespeare.fileids() + ['a_and_c.xml', 'dream.xml', 'hamlet.xml', 'j_caesar.xml', ...] + >>> play = shakespeare.xml('dream.xml') + >>> print(play) + + >>> print('%s: %s' % (play[0].tag, play[0].text)) + TITLE: A Midsummer Night's Dream + >>> personae = [persona.text for persona in + ... play.findall('PERSONAE/PERSONA')] + >>> print(personae) + ['THESEUS, Duke of Athens.', 'EGEUS, father to Hermia.', ...] + >>> # Find and print speakers not listed as personae + >>> names = [persona.split(',')[0] for persona in personae] + >>> speakers = set(speaker.text for speaker in + ... play.findall('*/*/*/SPEAKER')) + >>> print(sorted(speakers.difference(names))) + ['ALL', 'COBWEB', 'DEMETRIUS', 'Fairy', 'HERNIA', 'LYSANDER', + 'Lion', 'MOTH', 'MUSTARDSEED', 'Moonshine', 'PEASEBLOSSOM', + 'Prologue', 'Pyramus', 'Thisbe', 'Wall'] + +subjectivity +------------ +The Subjectivity Dataset contains 5000 subjective and 5000 objective processed +sentences. + + >>> from nltk.corpus import subjectivity + >>> subjectivity.categories() + ['obj', 'subj'] + >>> subjectivity.sents()[23] + ['television', 'made', 'him', 'famous', ',', 'but', 'his', 'biggest', 'hits', + 'happened', 'off', 'screen', '.'] + >>> subjectivity.words(categories='subj') + ['smart', 'and', 'alert', ',', 'thirteen', ...] + +toolbox +------- +The Toolbox corpus distributed with NLTK contains a sample lexicon and +several sample texts from the Rotokas language. The Toolbox corpus +reader returns Toolbox files as XML ElementTree objects. The +following example loads the Rotokas dictionary, and figures out the +distribution of part-of-speech tags for reduplicated words. + +.. doctest: +SKIP + + >>> from nltk.corpus import toolbox + >>> from nltk.probability import FreqDist + >>> from xml.etree import ElementTree + >>> import re + >>> rotokas = toolbox.xml('rotokas.dic') + >>> redup_pos_freqdist = FreqDist() + >>> # Note: we skip over the first record, which is actually + >>> # the header. + >>> for record in rotokas[1:]: + ... lexeme = record.find('lx').text + ... if re.match(r'(.*)\1$', lexeme): + ... redup_pos_freqdist[record.find('ps').text] += 1 + >>> for item, count in redup_pos_freqdist.most_common(): + ... print(item, count) + V 41 + N 14 + ??? 4 + +This example displays some records from a Rotokas text: + +.. doctest: +SKIP + + >>> river = toolbox.xml('rotokas/river.txt', key='ref') + >>> for record in river.findall('record')[:3]: + ... for piece in record: + ... if len(piece.text) > 60: + ... print('%-6s %s...' % (piece.tag, piece.text[:57])) + ... else: + ... print('%-6s %s' % (piece.tag, piece.text)) + ref Paragraph 1 + t ``Viapau oisio ra ovaupasi ... + m viapau oisio ra ovau -pa -si ... + g NEG this way/like this and forget -PROG -2/3.DL... + p NEG ??? CONJ V.I -SUFF.V.3 -SUFF.V... + f ``No ken lus tingting wanema samting papa i bin tok,'' Na... + fe ``Don't forget what Dad said,'' yelled Naomi. + ref 2 + t Osa Ira ora Reviti viapau uvupasiva. + m osa Ira ora Reviti viapau uvu -pa -si ... + g as/like name and name NEG hear/smell -PROG -2/3... + p CONJ N.PN CONJ N.PN NEG V.T -SUFF.V.3 -SUF... + f Tasol Ila na David no bin harim toktok. + fe But Ila and David took no notice. + ref 3 + t Ikaupaoro rokosiva ... + m ikau -pa -oro roko -si -va ... + g run/hurry -PROG -SIM go down -2/3.DL.M -RP ... + p V.T -SUFF.V.3 -SUFF.V.4 ADV -SUFF.V.4 -SUFF.VT.... + f Tupela i bin hariap i go long wara . + fe They raced to the river. + +timit +----- +The NLTK data package includes a fragment of the TIMIT +Acoustic-Phonetic Continuous Speech Corpus. This corpus is broken +down into small speech samples, each of which is available as a wave +file, a phonetic transcription, and a tokenized word list. + + >>> from nltk.corpus import timit + >>> print(timit.utteranceids()) + ['dr1-fvmh0/sa1', 'dr1-fvmh0/sa2', 'dr1-fvmh0/si1466', + 'dr1-fvmh0/si2096', 'dr1-fvmh0/si836', 'dr1-fvmh0/sx116', + 'dr1-fvmh0/sx206', 'dr1-fvmh0/sx26', 'dr1-fvmh0/sx296', ...] + + >>> item = timit.utteranceids()[5] + >>> print(timit.phones(item)) + ['h#', 'k', 'l', 'ae', 's', 'pcl', 'p', 'dh', 'ax', + 's', 'kcl', 'k', 'r', 'ux', 'ix', 'nx', 'y', 'ax', + 'l', 'eh', 'f', 'tcl', 't', 'hh', 'ae', 'n', 'dcl', + 'd', 'h#'] + >>> print(timit.words(item)) + ['clasp', 'the', 'screw', 'in', 'your', 'left', 'hand'] + >>> timit.play(item) # doctest: +SKIP + +The corpus reader can combine the word segmentation information with +the phonemes to produce a single tree structure: + + >>> for tree in timit.phone_trees(item): + ... print(tree) + (S + h# + (clasp k l ae s pcl p) + (the dh ax) + (screw s kcl k r ux) + (in ix nx) + (your y ax) + (left l eh f tcl t) + (hand hh ae n dcl d) + h#) + +The start time and stop time of each phoneme, word, and sentence are +also available: + + >>> print(timit.phone_times(item)) + [('h#', 0, 2190), ('k', 2190, 3430), ('l', 3430, 4326), ...] + >>> print(timit.word_times(item)) + [('clasp', 2190, 8804), ('the', 8804, 9734), ...] + >>> print(timit.sent_times(item)) + [('Clasp the screw in your left hand.', 0, 32154)] + +We can use these times to play selected pieces of a speech sample: + + >>> timit.play(item, 2190, 8804) # 'clasp' # doctest: +SKIP + +The corpus reader can also be queried for information about the +speaker and sentence identifier for a given speech sample: + + >>> print(timit.spkrid(item)) + dr1-fvmh0 + >>> print(timit.sentid(item)) + sx116 + >>> print(timit.spkrinfo(timit.spkrid(item))) + SpeakerInfo(id='VMH0', + sex='F', + dr='1', + use='TRN', + recdate='03/11/86', + birthdate='01/08/60', + ht='5\'05"', + race='WHT', + edu='BS', + comments='BEST NEW ENGLAND ACCENT SO FAR') + + >>> # List the speech samples from the same speaker: + >>> timit.utteranceids(spkrid=timit.spkrid(item)) + ['dr1-fvmh0/sa1', 'dr1-fvmh0/sa2', 'dr1-fvmh0/si1466', ...] + +twitter_samples +--------------- + +Twitter is well-known microblog service that allows public data to be +collected via APIs. NLTK's twitter corpus currently contains a sample of 20k Tweets +retrieved from the Twitter Streaming API. + + >>> from nltk.corpus import twitter_samples + >>> twitter_samples.fileids() + ['negative_tweets.json', 'positive_tweets.json', 'tweets.20150430-223406.json'] + +We follow standard practice in storing full Tweets as line-separated +JSON. These data structures can be accessed via `tweets.docs()`. However, in general it +is more practical to focus just on the text field of the Tweets, which +are accessed via the `strings()` method. + + >>> twitter_samples.strings('tweets.20150430-223406.json')[:5] + ['RT @KirkKus: Indirect cost of the UK being in the EU is estimated to be costing Britain \xa3170 billion per year! #BetterOffOut #UKIP', ...] + +The default tokenizer for Tweets is specialised for 'casual' text, and +the `tokenized()` method returns a list of lists of tokens. + + >>> twitter_samples.tokenized('tweets.20150430-223406.json')[:5] + [['RT', '@KirkKus', ':', 'Indirect', 'cost', 'of', 'the', 'UK', 'being', 'in', ...], + ['VIDEO', ':', 'Sturgeon', 'on', 'post-election', 'deals', 'http://t.co/BTJwrpbmOY'], ...] + +rte +--- +The RTE (Recognizing Textual Entailment) corpus was derived from the +RTE1, RTE2 and RTE3 datasets (dev and test data), and consists of a +list of XML-formatted 'text'/'hypothesis' pairs. + + >>> from nltk.corpus import rte + >>> print(rte.fileids()) + ['rte1_dev.xml', 'rte1_test.xml', 'rte2_dev.xml', ..., 'rte3_test.xml'] + >>> rtepairs = rte.pairs(['rte2_test.xml', 'rte3_test.xml']) + >>> print(rtepairs) + [, , , ...] + +In the gold standard test sets, each pair is labeled according to +whether or not the text 'entails' the hypothesis; the +entailment value is mapped to an integer 1 (True) or 0 (False). + + >>> rtepairs[5] + + >>> rtepairs[5].text + 'His wife Strida won a seat in parliament after forging an alliance + with the main anti-Syrian coalition in the recent election.' + >>> rtepairs[5].hyp + 'Strida elected to parliament.' + >>> rtepairs[5].value + 1 + +The RTE corpus also supports an ``xml()`` method which produces ElementTrees. + + >>> xmltree = rte.xml('rte3_dev.xml') + >>> xmltree # doctest: +SKIP + + >>> xmltree[7].findtext('t') + "Mrs. Bush's approval ratings have remained very high, above 80%, + even as her husband's have recently dropped below 50%." + +verbnet +------- +The VerbNet corpus is a lexicon that divides verbs into classes, based +on their syntax-semantics linking behavior. The basic elements in the +lexicon are verb lemmas, such as 'abandon' and 'accept', and verb +classes, which have identifiers such as 'remove-10.1' and +'admire-31.2-1'. These class identifiers consist of a representative +verb selected from the class, followed by a numerical identifier. The +list of verb lemmas, and the list of class identifiers, can be +retrieved with the following methods: + + >>> from nltk.corpus import verbnet + >>> verbnet.lemmas()[20:25] + ['accelerate', 'accept', 'acclaim', 'accompany', 'accrue'] + >>> verbnet.classids()[:5] + ['accompany-51.7', 'admire-31.2', 'admire-31.2-1', 'admit-65', 'adopt-93'] + +The `classids()` method may also be used to retrieve the classes that +a given lemma belongs to: + + >>> verbnet.classids('accept') + ['approve-77', 'characterize-29.2-1-1', 'obtain-13.5.2'] + +The `classids()` method may additionally be used to retrieve all classes +within verbnet if nothing is passed: + + >>> verbnet.classids() + ['accompany-51.7', 'admire-31.2', 'admire-31.2-1', 'admit-65', 'adopt-93', 'advise-37.9', 'advise-37.9-1', 'allow-64', 'amalgamate-22.2', 'amalgamate-22.2-1', 'amalgamate-22.2-1-1', 'amalgamate-22.2-2', 'amalgamate-22.2-2-1', 'amalgamate-22.2-3', 'amalgamate-22.2-3-1', 'amalgamate-22.2-3-1-1', 'amalgamate-22.2-3-2', 'amuse-31.1', 'animal_sounds-38', 'appeal-31.4', 'appeal-31.4-1', 'appeal-31.4-2', 'appeal-31.4-3', 'appear-48.1.1', 'appoint-29.1', 'approve-77', 'assessment-34', 'assuming_position-50', 'avoid-52', 'banish-10.2', 'battle-36.4', 'battle-36.4-1', 'begin-55.1', 'begin-55.1-1', 'being_dressed-41.3.3', 'bend-45.2', 'berry-13.7', 'bill-54.5', 'body_internal_motion-49', 'body_internal_states-40.6', 'braid-41.2.2', 'break-45.1', 'breathe-40.1.2', 'breathe-40.1.2-1', 'bring-11.3', 'bring-11.3-1', 'build-26.1', 'build-26.1-1', 'bulge-47.5.3', 'bump-18.4', 'bump-18.4-1', 'butter-9.9', 'calibratable_cos-45.6', 'calibratable_cos-45.6-1', 'calve-28', 'captain-29.8', 'captain-29.8-1', 'captain-29.8-1-1', 'care-88', 'care-88-1', 'carry-11.4', 'carry-11.4-1', 'carry-11.4-1-1', 'carve-21.2', 'carve-21.2-1', 'carve-21.2-2', 'change_bodily_state-40.8.4', 'characterize-29.2', 'characterize-29.2-1', 'characterize-29.2-1-1', 'characterize-29.2-1-2', 'chase-51.6', 'cheat-10.6', 'cheat-10.6-1', 'cheat-10.6-1-1', 'chew-39.2', 'chew-39.2-1', 'chew-39.2-2', 'chit_chat-37.6', 'clear-10.3', 'clear-10.3-1', 'cling-22.5', 'coil-9.6', 'coil-9.6-1', 'coloring-24', 'complain-37.8', 'complete-55.2', 'concealment-16', 'concealment-16-1', 'confess-37.10', 'confine-92', 'confine-92-1', 'conjecture-29.5', 'conjecture-29.5-1', 'conjecture-29.5-2', 'consider-29.9', 'consider-29.9-1', 'consider-29.9-1-1', 'consider-29.9-1-1-1', 'consider-29.9-2', 'conspire-71', 'consume-66', 'consume-66-1', 'contiguous_location-47.8', 'contiguous_location-47.8-1', 'contiguous_location-47.8-2', 'continue-55.3', 'contribute-13.2', 'contribute-13.2-1', 'contribute-13.2-1-1', 'contribute-13.2-1-1-1', 'contribute-13.2-2', 'contribute-13.2-2-1', 'convert-26.6.2', 'convert-26.6.2-1', 'cooking-45.3', 'cooperate-73', 'cooperate-73-1', 'cooperate-73-2', 'cooperate-73-3', 'cope-83', 'cope-83-1', 'cope-83-1-1', 'correlate-86', 'correspond-36.1', 'correspond-36.1-1', 'correspond-36.1-1-1', 'cost-54.2', 'crane-40.3.2', 'create-26.4', 'create-26.4-1', 'curtsey-40.3.3', 'cut-21.1', 'cut-21.1-1', 'debone-10.8', 'declare-29.4', 'declare-29.4-1', 'declare-29.4-1-1', 'declare-29.4-1-1-1', 'declare-29.4-1-1-2', 'declare-29.4-1-1-3', 'declare-29.4-2', 'dedicate-79', 'defend-85', 'destroy-44', 'devour-39.4', 'devour-39.4-1', 'devour-39.4-2', 'differ-23.4', 'dine-39.5', 'disappearance-48.2', 'disassemble-23.3', 'discover-84', 'discover-84-1', 'discover-84-1-1', 'dress-41.1.1', 'dressing_well-41.3.2', 'drive-11.5', 'drive-11.5-1', 'dub-29.3', 'dub-29.3-1', 'eat-39.1', 'eat-39.1-1', 'eat-39.1-2', 'enforce-63', 'engender-27', 'entity_specific_cos-45.5', 'entity_specific_modes_being-47.2', 'equip-13.4.2', 'equip-13.4.2-1', 'equip-13.4.2-1-1', 'escape-51.1', 'escape-51.1-1', 'escape-51.1-2', 'escape-51.1-2-1', 'exceed-90', 'exchange-13.6', 'exchange-13.6-1', 'exchange-13.6-1-1', 'exhale-40.1.3', 'exhale-40.1.3-1', 'exhale-40.1.3-2', 'exist-47.1', 'exist-47.1-1', 'exist-47.1-1-1', 'feeding-39.7', 'ferret-35.6', 'fill-9.8', 'fill-9.8-1', 'fit-54.3', 'flinch-40.5', 'floss-41.2.1', 'focus-87', 'forbid-67', 'force-59', 'force-59-1', 'free-80', 'free-80-1', 'fulfilling-13.4.1', 'fulfilling-13.4.1-1', 'fulfilling-13.4.1-2', 'funnel-9.3', 'funnel-9.3-1', 'funnel-9.3-2', 'funnel-9.3-2-1', 'future_having-13.3', 'get-13.5.1', 'get-13.5.1-1', 'give-13.1', 'give-13.1-1', 'gobble-39.3', 'gobble-39.3-1', 'gobble-39.3-2', 'gorge-39.6', 'groom-41.1.2', 'grow-26.2', 'help-72', 'help-72-1', 'herd-47.5.2', 'hiccup-40.1.1', 'hit-18.1', 'hit-18.1-1', 'hold-15.1', 'hold-15.1-1', 'hunt-35.1', 'hurt-40.8.3', 'hurt-40.8.3-1', 'hurt-40.8.3-1-1', 'hurt-40.8.3-2', 'illustrate-25.3', 'image_impression-25.1', 'indicate-78', 'indicate-78-1', 'indicate-78-1-1', 'inquire-37.1.2', 'instr_communication-37.4', 'investigate-35.4', 'judgement-33', 'keep-15.2', 'knead-26.5', 'learn-14', 'learn-14-1', 'learn-14-2', 'learn-14-2-1', 'leave-51.2', 'leave-51.2-1', 'lecture-37.11', 'lecture-37.11-1', 'lecture-37.11-1-1', 'lecture-37.11-2', 'light_emission-43.1', 'limit-76', 'linger-53.1', 'linger-53.1-1', 'lodge-46', 'long-32.2', 'long-32.2-1', 'long-32.2-2', 'manner_speaking-37.3', 'marry-36.2', 'marvel-31.3', 'marvel-31.3-1', 'marvel-31.3-2', 'marvel-31.3-3', 'marvel-31.3-4', 'marvel-31.3-5', 'marvel-31.3-6', 'marvel-31.3-7', 'marvel-31.3-8', 'marvel-31.3-9', 'masquerade-29.6', 'masquerade-29.6-1', 'masquerade-29.6-2', 'matter-91', 'meander-47.7', 'meet-36.3', 'meet-36.3-1', 'meet-36.3-2', 'mine-10.9', 'mix-22.1', 'mix-22.1-1', 'mix-22.1-1-1', 'mix-22.1-2', 'mix-22.1-2-1', 'modes_of_being_with_motion-47.3', 'murder-42.1', 'murder-42.1-1', 'neglect-75', 'neglect-75-1', 'neglect-75-1-1', 'neglect-75-2', 'nonvehicle-51.4.2', 'nonverbal_expression-40.2', 'obtain-13.5.2', 'obtain-13.5.2-1', 'occurrence-48.3', 'order-60', 'order-60-1', 'orphan-29.7', 'other_cos-45.4', 'pain-40.8.1', 'pay-68', 'peer-30.3', 'pelt-17.2', 'performance-26.7', 'performance-26.7-1', 'performance-26.7-1-1', 'performance-26.7-2', 'performance-26.7-2-1', 'pit-10.7', 'pocket-9.10', 'pocket-9.10-1', 'poison-42.2', 'poke-19', 'pour-9.5', 'preparing-26.3', 'preparing-26.3-1', 'preparing-26.3-2', 'price-54.4', 'push-12', 'push-12-1', 'push-12-1-1', 'put-9.1', 'put-9.1-1', 'put-9.1-2', 'put_direction-9.4', 'put_spatial-9.2', 'put_spatial-9.2-1', 'reach-51.8', 'reflexive_appearance-48.1.2', 'refrain-69', 'register-54.1', 'rely-70', 'remove-10.1', 'risk-94', 'risk-94-1', 'roll-51.3.1', 'rummage-35.5', 'run-51.3.2', 'rush-53.2', 'say-37.7', 'say-37.7-1', 'say-37.7-1-1', 'say-37.7-2', 'scribble-25.2', 'search-35.2', 'see-30.1', 'see-30.1-1', 'see-30.1-1-1', 'send-11.1', 'send-11.1-1', 'separate-23.1', 'separate-23.1-1', 'separate-23.1-2', 'settle-89', 'shake-22.3', 'shake-22.3-1', 'shake-22.3-1-1', 'shake-22.3-2', 'shake-22.3-2-1', 'sight-30.2', 'simple_dressing-41.3.1', 'slide-11.2', 'slide-11.2-1-1', 'smell_emission-43.3', 'snooze-40.4', 'sound_emission-43.2', 'sound_existence-47.4', 'spank-18.3', 'spatial_configuration-47.6', 'split-23.2', 'spray-9.7', 'spray-9.7-1', 'spray-9.7-1-1', 'spray-9.7-2', 'stalk-35.3', 'steal-10.5', 'stimulus_subject-30.4', 'stop-55.4', 'stop-55.4-1', 'substance_emission-43.4', 'succeed-74', 'succeed-74-1', 'succeed-74-1-1', 'succeed-74-2', 'suffocate-40.7', 'suspect-81', 'swarm-47.5.1', 'swarm-47.5.1-1', 'swarm-47.5.1-2', 'swarm-47.5.1-2-1', 'swat-18.2', 'talk-37.5', 'tape-22.4', 'tape-22.4-1', 'tell-37.2', 'throw-17.1', 'throw-17.1-1', 'throw-17.1-1-1', 'tingle-40.8.2', 'touch-20', 'touch-20-1', 'transcribe-25.4', 'transfer_mesg-37.1.1', 'transfer_mesg-37.1.1-1', 'transfer_mesg-37.1.1-1-1', 'try-61', 'turn-26.6.1', 'turn-26.6.1-1', 'urge-58', 'vehicle-51.4.1', 'vehicle-51.4.1-1', 'waltz-51.5', 'want-32.1', 'want-32.1-1', 'want-32.1-1-1', 'weather-57', 'weekend-56', 'wink-40.3.1', 'wink-40.3.1-1', 'wipe_instr-10.4.2', 'wipe_instr-10.4.2-1', 'wipe_manner-10.4.1', 'wipe_manner-10.4.1-1', 'wish-62', 'withdraw-82', 'withdraw-82-1', 'withdraw-82-2', 'withdraw-82-3'] + +The primary object in the lexicon is a class record, which is stored +as an ElementTree xml object. The class record for a given class +identifier is returned by the `vnclass()` method: + + >>> verbnet.vnclass('remove-10.1') + + +The `vnclass()` method also accepts "short" identifiers, such as '10.1': + + >>> verbnet.vnclass('10.1') + + +See the Verbnet documentation, or the Verbnet files, for information +about the structure of this xml. As an example, we can retrieve a +list of thematic roles for a given Verbnet class: + + >>> vn_31_2 = verbnet.vnclass('admire-31.2') + >>> for themrole in vn_31_2.findall('THEMROLES/THEMROLE'): + ... print(themrole.attrib['type'], end=' ') + ... for selrestr in themrole.findall('SELRESTRS/SELRESTR'): + ... print('[%(Value)s%(type)s]' % selrestr.attrib, end=' ') + ... print() + Theme + Experiencer [+animate] + Predicate + +The Verbnet corpus also provides a variety of pretty printing +functions that can be used to display the xml contents in a more +concise form. The simplest such method is `pprint()`: + + >>> print(verbnet.pprint('57')) + weather-57 + Subclasses: (none) + Members: blow clear drizzle fog freeze gust hail howl lightning mist + mizzle pelt pour precipitate rain roar shower sleet snow spit spot + sprinkle storm swelter teem thaw thunder + Thematic roles: + * Theme[+concrete +force] + Frames: + Intransitive (Expletive Subject) + Example: It's raining. + Syntax: LEX[it] LEX[[+be]] VERB + Semantics: + * weather(during(E), Weather_type, ?Theme) + NP (Expletive Subject, Theme Object) + Example: It's raining cats and dogs. + Syntax: LEX[it] LEX[[+be]] VERB NP[Theme] + Semantics: + * weather(during(E), Weather_type, Theme) + PP (Expletive Subject, Theme-PP) + Example: It was pelting with rain. + Syntax: LEX[it[+be]] VERB PREP[with] NP[Theme] + Semantics: + * weather(during(E), Weather_type, Theme) + +Verbnet gives us frames that link the syntax and semantics using an example. +These frames are part of the corpus and we can use `frames()` to get a frame +for a given verbnet class. + + >>> frame = verbnet.frames('57') + >>> frame == [{'example': "It's raining.", 'description': {'primary': 'Intransitive', 'secondary': 'Expletive Subject'}, 'syntax': [{'pos_tag': 'LEX', 'modifiers': {'value': 'it', 'selrestrs': [], 'synrestrs': []}}, {'pos_tag': 'LEX', 'modifiers': {'value': '[+be]', 'selrestrs': [], 'synrestrs': []}}, {'pos_tag': 'VERB', 'modifiers': {'value': '', 'selrestrs': [], 'synrestrs': []}}], 'semantics': [{'predicate_value': 'weather', 'arguments': [{'type': 'Event', 'value': 'during(E)'}, {'type': 'VerbSpecific', 'value': 'Weather_type'}, {'type': 'ThemRole', 'value': '?Theme'}], 'negated': False}]}, {'example': "It's raining cats and dogs.", 'description': {'primary': 'NP', 'secondary': 'Expletive Subject, Theme Object'}, 'syntax': [{'pos_tag': 'LEX', 'modifiers': {'value': 'it', 'selrestrs': [], 'synrestrs': []}}, {'pos_tag': 'LEX', 'modifiers': {'value': '[+be]', 'selrestrs': [], 'synrestrs': []}}, {'pos_tag': 'VERB', 'modifiers': {'value': '', 'selrestrs': [], 'synrestrs': []}}, {'pos_tag': 'NP', 'modifiers': {'value': 'Theme', 'selrestrs': [], 'synrestrs': []}}], 'semantics': [{'predicate_value': 'weather', 'arguments': [{'type': 'Event', 'value': 'during(E)'}, {'type': 'VerbSpecific', 'value': 'Weather_type'}, {'type': 'ThemRole', 'value': 'Theme'}], 'negated': False}]}, {'example': 'It was pelting with rain.', 'description': {'primary': 'PP', 'secondary': 'Expletive Subject, Theme-PP'}, 'syntax': [{'pos_tag': 'LEX', 'modifiers': {'value': 'it[+be]', 'selrestrs': [], 'synrestrs': []}}, {'pos_tag': 'VERB', 'modifiers': {'value': '', 'selrestrs': [], 'synrestrs': []}}, {'pos_tag': 'PREP', 'modifiers': {'value': 'with', 'selrestrs': [], 'synrestrs': []}}, {'pos_tag': 'NP', 'modifiers': {'value': 'Theme', 'selrestrs': [], 'synrestrs': []}}], 'semantics': [{'predicate_value': 'weather', 'arguments': [{'type': 'Event', 'value': 'during(E)'}, {'type': 'VerbSpecific', 'value': 'Weather_type'}, {'type': 'ThemRole', 'value': 'Theme'}], 'negated': False}]}] + True + +Verbnet corpus lets us access thematic roles individually using `themroles()`. + + >>> themroles = verbnet.themroles('57') + >>> themroles == [{'modifiers': [{'type': 'concrete', 'value': '+'}, {'type': 'force', 'value': '+'}], 'type': 'Theme'}] + True + +Verbnet classes may also have subclasses sharing similar syntactic and semantic properties +while having differences with the superclass. The Verbnet corpus allows us to access these +subclasses using `subclasses()`. + + >>> print(verbnet.subclasses('9.1')) #Testing for 9.1 since '57' does not have subclasses + ['put-9.1-1', 'put-9.1-2'] + + +nps_chat +-------- + +The NPS Chat Corpus, Release 1.0 consists of over 10,000 posts in age-specific +chat rooms, which have been anonymized, POS-tagged and dialogue-act tagged. + + >>> print(nltk.corpus.nps_chat.words()) + ['now', 'im', 'left', 'with', 'this', 'gay', ...] + >>> print(nltk.corpus.nps_chat.tagged_words()) + [('now', 'RB'), ('im', 'PRP'), ('left', 'VBD'), ...] + >>> print(nltk.corpus.nps_chat.tagged_posts()) + [[('now', 'RB'), ('im', 'PRP'), ('left', 'VBD'), ('with', 'IN'), + ('this', 'DT'), ('gay', 'JJ'), ('name', 'NN')], [(':P', 'UH')], ...] + +We can access the XML elements corresponding to individual posts. These elements +have ``class`` and ``user`` attributes that we can access using ``p.attrib['class']`` +and ``p.attrib['user']``. They also have text content, accessed using ``p.text``. + + >>> print(nltk.corpus.nps_chat.xml_posts()) + [, , ...] + >>> posts = nltk.corpus.nps_chat.xml_posts() + >>> sorted(nltk.FreqDist(p.attrib['class'] for p in posts).keys()) + ['Accept', 'Bye', 'Clarify', 'Continuer', 'Emotion', 'Emphasis', + 'Greet', 'Other', 'Reject', 'Statement', 'System', 'nAnswer', + 'whQuestion', 'yAnswer', 'ynQuestion'] + >>> posts[0].text + 'now im left with this gay name' + +In addition to the above methods for accessing tagged text, we can navigate +the XML structure directly, as follows: + + >>> tokens = posts[0].findall('terminals/t') + >>> [t.attrib['pos'] + "/" + t.attrib['word'] for t in tokens] + ['RB/now', 'PRP/im', 'VBD/left', 'IN/with', 'DT/this', 'JJ/gay', 'NN/name'] + +multext_east +------------ + +The Multext-East Corpus consists of POS-tagged versions of George Orwell's book +1984 in 12 languages: English, Czech, Hungarian, Macedonian, Slovenian, Serbian, +Slovak, Romanian, Estonian, Farsi, Bulgarian and Polish. +The corpus can be accessed using the usual methods for tagged corpora. The tagset +can be transformed from the Multext-East specific MSD tags to the Universal tagset +using the "tagset" parameter of all functions returning tagged parts of the corpus. + + >>> print(nltk.corpus.multext_east.words("oana-en.xml")) + ['It', 'was', 'a', 'bright', ...] + >>> print(nltk.corpus.multext_east.tagged_words("oana-en.xml")) + [('It', '#Pp3ns'), ('was', '#Vmis3s'), ('a', '#Di'), ...] + >>> print(nltk.corpus.multext_east.tagged_sents("oana-en.xml", "universal")) + [[('It', 'PRON'), ('was', 'VERB'), ('a', 'DET'), ...] + + + +--------------------- +Corpus Reader Classes +--------------------- + +NLTK's *corpus reader* classes are used to access the contents of a +diverse set of corpora. Each corpus reader class is specialized to +handle a specific corpus format. Examples include the +`PlaintextCorpusReader`, which handles corpora that consist of a set +of unannotated text files, and the `BracketParseCorpusReader`, which +handles corpora that consist of files containing +parenthesis-delineated parse trees. + +Automatically Created Corpus Reader Instances +============================================= + +When the `nltk.corpus` module is imported, it automatically creates a +set of corpus reader instances that can be used to access the corpora +in the NLTK data distribution. Here is a small sample of those +corpus reader instances: + + >>> import nltk + >>> nltk.corpus.brown + + >>> nltk.corpus.treebank + + >>> nltk.corpus.names + + >>> nltk.corpus.genesis + + >>> nltk.corpus.inaugural + + +This sample illustrates that different corpus reader classes are used +to read different corpora; but that the same corpus reader class may +be used for more than one corpus (e.g., ``genesis`` and ``inaugural``). + +Creating New Corpus Reader Instances +==================================== + +Although the `nltk.corpus` module automatically creates corpus reader +instances for the corpora in the NLTK data distribution, you may +sometimes need to create your own corpus reader. In particular, you +would need to create your own corpus reader if you want... + +- To access a corpus that is not included in the NLTK data + distribution. + +- To access a full copy of a corpus for which the NLTK data + distribution only provides a sample. + +- To access a corpus using a customized corpus reader (e.g., with + a customized tokenizer). + +To create a new corpus reader, you will first need to look up the +signature for that corpus reader's constructor. Different corpus +readers have different constructor signatures, but most of the +constructor signatures have the basic form:: + + SomeCorpusReader(root, files, ...options...) + +Where ``root`` is an absolute path to the directory containing the +corpus data files; ``files`` is either a list of file names (relative +to ``root``) or a regexp specifying which files should be included; +and ``options`` are additional reader-specific options. For example, +we can create a customized corpus reader for the genesis corpus that +uses a different sentence tokenizer as follows: + + >>> # Find the directory where the corpus lives. + >>> genesis_dir = nltk.data.find('corpora/genesis') + >>> # Create our custom sentence tokenizer. + >>> my_sent_tokenizer = nltk.RegexpTokenizer('[^.!?]+') + >>> # Create the new corpus reader object. + >>> my_genesis = nltk.corpus.PlaintextCorpusReader( + ... genesis_dir, r'.*\.txt', sent_tokenizer=my_sent_tokenizer) + >>> # Use the new corpus reader object. + >>> print(my_genesis.sents('english-kjv.txt')[0]) + ['In', 'the', 'beginning', 'God', 'created', 'the', 'heaven', + 'and', 'the', 'earth'] + +If you wish to read your own plaintext corpus, which is stored in the +directory '/usr/share/some-corpus', then you can create a corpus +reader for it with:: + + >>> my_corpus = nltk.corpus.PlaintextCorpusReader( + ... '/usr/share/some-corpus', r'.*\.txt') # doctest: +SKIP + +For a complete list of corpus reader subclasses, see the API +documentation for `nltk.corpus.reader`. + +Corpus Types +============ + +Corpora vary widely in the types of content they include. This is +reflected in the fact that the base class `CorpusReader` only defines +a few general-purpose methods for listing and accessing the files that +make up a corpus. It is up to the subclasses to define *data access +methods* that provide access to the information in the corpus. +However, corpus reader subclasses should be consistent in their +definitions of these data access methods wherever possible. + +At a high level, corpora can be divided into three basic types: + +- A *token corpus* contains information about specific occurrences of + language use (or linguistic tokens), such as dialogues or written + texts. Examples of token corpora are collections of written text + and collections of speech. + +- A *type corpus*, or *lexicon*, contains information about a coherent + set of lexical items (or linguistic types). Examples of lexicons + are dictionaries and word lists. + +- A *language description corpus* contains information about a set of + non-lexical linguistic constructs, such as grammar rules. + +However, many individual corpora blur the distinctions between these +types. For example, corpora that are primarily lexicons may include +token data in the form of example sentences; and corpora that are +primarily token corpora may be accompanied by one or more word lists +or other lexical data sets. + +Because corpora vary so widely in their information content, we have +decided that it would not be wise to use separate corpus reader base +classes for different corpus types. Instead, we simply try to make +the corpus readers consistent wherever possible, but let them differ +where the underlying data itself differs. + +Common Corpus Reader Methods +============================ + +As mentioned above, there are only a handful of methods that all +corpus readers are guaranteed to implement. These methods provide +access to the files that contain the corpus data. Every corpus is +assumed to consist of one or more files, all located in a common root +directory (or in subdirectories of that root directory). The absolute +path to the root directory is stored in the ``root`` property: + + >>> import os + >>> str(nltk.corpus.genesis.root).replace(os.path.sep,'/') + '.../nltk_data/corpora/genesis' + +Each file within the corpus is identified by a platform-independent +identifier, which is basically a path string that uses ``/`` as the +path separator. I.e., this identifier can be converted to a relative +path as follows: + + >>> some_corpus_file_id = nltk.corpus.reuters.fileids()[0] + >>> import os.path + >>> os.path.normpath(some_corpus_file_id).replace(os.path.sep,'/') + 'test/14826' + +To get a list of all data files that make up a corpus, use the +``fileids()`` method. In some corpora, these files will not all contain +the same type of data; for example, for the ``nltk.corpus.timit`` +corpus, ``fileids()`` will return a list including text files, word +segmentation files, phonetic transcription files, sound files, and +metadata files. For corpora with diverse file types, the ``fileids()`` +method will often take one or more optional arguments, which can be +used to get a list of the files with a specific file type: + + >>> nltk.corpus.timit.fileids() + ['dr1-fvmh0/sa1.phn', 'dr1-fvmh0/sa1.txt', 'dr1-fvmh0/sa1.wav', ...] + >>> nltk.corpus.timit.fileids('phn') + ['dr1-fvmh0/sa1.phn', 'dr1-fvmh0/sa2.phn', 'dr1-fvmh0/si1466.phn', ...] + +In some corpora, the files are divided into distinct categories. For +these corpora, the ``fileids()`` method takes an optional argument, +which can be used to get a list of the files within a specific category: + + >>> nltk.corpus.brown.fileids('hobbies') + ['ce01', 'ce02', 'ce03', 'ce04', 'ce05', 'ce06', 'ce07', ...] + +The ``abspath()`` method can be used to find the absolute path to a +corpus file, given its file identifier: + + >>> str(nltk.corpus.brown.abspath('ce06')).replace(os.path.sep,'/') + '.../corpora/brown/ce06' + +The ``abspaths()`` method can be used to find the absolute paths for +one corpus file, a list of corpus files, or (if no fileids are specified), +all corpus files. + +This method is mainly useful as a helper method when defining corpus +data access methods, since data access methods can usually be called +with a string argument (to get a view for a specific file), with a +list argument (to get a view for a specific list of files), or with no +argument (to get a view for the whole corpus). + +Data Access Methods +=================== + +Individual corpus reader subclasses typically extend this basic set of +file-access methods with one or more *data access methods*, which provide +easy access to the data contained in the corpus. The signatures for +data access methods often have the basic form:: + + corpus_reader.some_data access(fileids=None, ...options...) + +Where ``fileids`` can be a single file identifier string (to get a view +for a specific file); a list of file identifier strings (to get a view +for a specific list of files); or None (to get a view for the entire +corpus). Some of the common data access methods, and their return +types, are: + + - I{corpus}.words(): list of str + - I{corpus}.sents(): list of (list of str) + - I{corpus}.paras(): list of (list of (list of str)) + - I{corpus}.tagged_words(): list of (str,str) tuple + - I{corpus}.tagged_sents(): list of (list of (str,str)) + - I{corpus}.tagged_paras(): list of (list of (list of (str,str))) + - I{corpus}.chunked_sents(): list of (Tree w/ (str,str) leaves) + - I{corpus}.parsed_sents(): list of (Tree with str leaves) + - I{corpus}.parsed_paras(): list of (list of (Tree with str leaves)) + - I{corpus}.xml(): A single xml ElementTree + - I{corpus}.raw(): str (unprocessed corpus contents) + +For example, the `words()` method is supported by many different +corpora, and returns a flat list of word strings: + + >>> nltk.corpus.brown.words() + ['The', 'Fulton', 'County', 'Grand', 'Jury', ...] + >>> nltk.corpus.treebank.words() + ['Pierre', 'Vinken', ',', '61', 'years', 'old', ...] + >>> nltk.corpus.conll2002.words() + ['Sao', 'Paulo', '(', 'Brasil', ')', ',', '23', ...] + >>> nltk.corpus.genesis.words() + ['In', 'the', 'beginning', 'God', 'created', ...] + +On the other hand, the `tagged_words()` method is only supported by +corpora that include part-of-speech annotations: + + >>> nltk.corpus.brown.tagged_words() + [('The', 'AT'), ('Fulton', 'NP-TL'), ...] + >>> nltk.corpus.treebank.tagged_words() + [('Pierre', 'NNP'), ('Vinken', 'NNP'), ...] + >>> nltk.corpus.conll2002.tagged_words() + [('Sao', 'NC'), ('Paulo', 'VMI'), ('(', 'Fpa'), ...] + >>> nltk.corpus.genesis.tagged_words() + Traceback (most recent call last): + ... + AttributeError: 'PlaintextCorpusReader' object has no attribute 'tagged_words' + +Although most corpus readers use file identifiers to index their +content, some corpora use different identifiers instead. For example, +the data access methods for the ``timit`` corpus uses *utterance +identifiers* to select which corpus items should be returned: + + >>> nltk.corpus.timit.utteranceids() + ['dr1-fvmh0/sa1', 'dr1-fvmh0/sa2', 'dr1-fvmh0/si1466', ...] + >>> nltk.corpus.timit.words('dr1-fvmh0/sa2') + ["don't", 'ask', 'me', 'to', 'carry', 'an', 'oily', 'rag', 'like', 'that'] + +Attempting to call ``timit``\ 's data access methods with a file +identifier will result in an exception: + + >>> nltk.corpus.timit.fileids() + ['dr1-fvmh0/sa1.phn', 'dr1-fvmh0/sa1.txt', 'dr1-fvmh0/sa1.wav', ...] + >>> nltk.corpus.timit.words('dr1-fvmh0/sa1.txt') # doctest: +SKIP + Traceback (most recent call last): + ... + IOError: No such file or directory: '.../dr1-fvmh0/sa1.txt.wrd' + +As another example, the ``propbank`` corpus defines the ``roleset()`` +method, which expects a roleset identifier, not a file identifier: + + >>> roleset = nltk.corpus.propbank.roleset('eat.01') + >>> from xml.etree import ElementTree as ET + >>> print(ET.tostring(roleset).decode('utf8')) + + + ...... + ... + ... + +Stream Backed Corpus Views +========================== +An important feature of NLTK's corpus readers is that many of them +access the underlying data files using "corpus views." A *corpus +view* is an object that acts like a simple data structure (such as a +list), but does not store the data elements in memory; instead, data +elements are read from the underlying data files on an as-needed +basis. + +By only loading items from the file on an as-needed basis, corpus +views maintain both memory efficiency and responsiveness. The memory +efficiency of corpus readers is important because some corpora contain +very large amounts of data, and storing the entire data set in memory +could overwhelm many machines. The responsiveness is important when +experimenting with corpora in interactive sessions and in in-class +demonstrations. + +The most common corpus view is the `StreamBackedCorpusView`, which +acts as a read-only list of tokens. Two additional corpus view +classes, `ConcatenatedCorpusView` and `LazySubsequence`, make it +possible to create concatenations and take slices of +`StreamBackedCorpusView` objects without actually storing the +resulting list-like object's elements in memory. + +In the future, we may add additional corpus views that act like other +basic data structures, such as dictionaries. + +Writing New Corpus Readers +========================== + +In order to add support for new corpus formats, it is necessary to +define new corpus reader classes. For many corpus formats, writing +new corpus readers is relatively straight-forward. In this section, +we'll describe what's involved in creating a new corpus reader. If +you do create a new corpus reader, we encourage you to contribute it +back to the NLTK project. + +Don't Reinvent the Wheel +------------------------ +Before you start writing a new corpus reader, you should check to be +sure that the desired format can't be read using an existing corpus +reader with appropriate constructor arguments. For example, although +the `TaggedCorpusReader` assumes that words and tags are separated by +``/`` characters by default, an alternative tag-separation character +can be specified via the ``sep`` constructor argument. You should +also check whether the new corpus format can be handled by subclassing +an existing corpus reader, and tweaking a few methods or variables. + +Design +------ +If you decide to write a new corpus reader from scratch, then you +should first decide which data access methods you want the reader to +provide, and what their signatures should be. You should look at +existing corpus readers that process corpora with similar data +contents, and try to be consistent with those corpus readers whenever +possible. + +You should also consider what sets of identifiers are appropriate for +the corpus format. Where it's practical, file identifiers should be +used. However, for some corpora, it may make sense to use additional +sets of identifiers. Each set of identifiers should have a distinct +name (e.g., fileids, utteranceids, rolesets); and you should be consistent +in using that name to refer to that identifier. Do not use parameter +names like ``id``, which leave it unclear what type of identifier is +required. + +Once you've decided what data access methods and identifiers are +appropriate for your corpus, you should decide if there are any +customizable parameters that you'd like the corpus reader to handle. +These parameters make it possible to use a single corpus reader to +handle a wider variety of corpora. The ``sep`` argument for +`TaggedCorpusReader`, mentioned above, is an example of a customizable +corpus reader parameter. + +Implementation +-------------- + +Constructor +~~~~~~~~~~~ +If your corpus reader implements any customizable parameters, then +you'll need to override the constructor. Typically, the new +constructor will first call its base class's constructor, and then +store the customizable parameters. For example, the +`ConllChunkCorpusReader`\ 's constructor is defined as follows: + + >>> def __init__(self, root, fileids, chunk_types, encoding='utf8', + ... tagset=None, separator=None): + ... ConllCorpusReader.__init__( + ... self, root, fileids, ('words', 'pos', 'chunk'), + ... chunk_types=chunk_types, encoding=encoding, + ... tagset=tagset, separator=separator) + +If your corpus reader does not implement any customization parameters, +then you can often just inherit the base class's constructor. + +Data Access Methods +~~~~~~~~~~~~~~~~~~~ + +The most common type of data access method takes an argument +identifying which files to access, and returns a view covering those +files. This argument may be a single file identifier string (to get a +view for a specific file); a list of file identifier strings (to get a +view for a specific list of files); or None (to get a view for the +entire corpus). The method's implementation converts this argument to +a list of path names using the `abspaths()` method, which handles all +three value types (string, list, and None): + + >>> print(str(nltk.corpus.brown.abspaths()).replace('\\\\','/')) + [FileSystemPathPointer('.../corpora/brown/ca01'), + FileSystemPathPointer('.../corpora/brown/ca02'), ...] + >>> print(str(nltk.corpus.brown.abspaths('ce06')).replace('\\\\','/')) + [FileSystemPathPointer('.../corpora/brown/ce06')] + >>> print(str(nltk.corpus.brown.abspaths(['ce06', 'ce07'])).replace('\\\\','/')) + [FileSystemPathPointer('.../corpora/brown/ce06'), + FileSystemPathPointer('.../corpora/brown/ce07')] + +An example of this type of method is the `words()` method, defined by +the `PlaintextCorpusReader` as follows: + + >>> def words(self, fileids=None): + ... return concat([self.CorpusView(fileid, self._read_word_block) + ... for fileid in self.abspaths(fileids)]) + +This method first uses `abspaths()` to convert ``fileids`` to a list of +absolute paths. It then creates a corpus view for each file, using +the `PlaintextCorpusReader._read_word_block()` method to read elements +from the data file (see the discussion of corpus views below). +Finally, it combines these corpus views using the +`nltk.corpus.reader.util.concat()` function. + +When writing a corpus reader for a corpus that is never expected to be +very large, it can sometimes be appropriate to read the files +directly, rather than using a corpus view. For example, the +`WordListCorpusView` class defines its `words()` method as follows: + + >>> def words(self, fileids=None): + ... return concat([[w for w in open(fileid).read().split('\n') if w] + ... for fileid in self.abspaths(fileids)]) + +(This is usually more appropriate for lexicons than for token corpora.) + +If the type of data returned by a data access method is one for which +NLTK has a conventional representation (e.g., words, tagged words, and +parse trees), then you should use that representation. Otherwise, you +may find it necessary to define your own representation. For data +structures that are relatively corpus-specific, it's usually best to +define new classes for these elements. For example, the ``propbank`` +corpus defines the `PropbankInstance` class to store the semantic role +labeling instances described by the corpus; and the ``ppattach`` +corpus defines the `PPAttachment` class to store the prepositional +attachment instances described by the corpus. + +Corpus Views +~~~~~~~~~~~~ +.. (Much of the content for this section is taken from the + StreamBackedCorpusView docstring.) + +The heart of a `StreamBackedCorpusView` is its *block reader* +function, which reads zero or more tokens from a stream, and returns +them as a list. A very simple example of a block reader is: + + >>> def simple_block_reader(stream): + ... return stream.readline().split() + +This simple block reader reads a single line at a time, and returns a +single token (consisting of a string) for each whitespace-separated +substring on the line. A `StreamBackedCorpusView` built from this +block reader will act like a read-only list of all the +whitespace-separated tokens in an underlying file. + +When deciding how to define the block reader for a given corpus, +careful consideration should be given to the size of blocks handled by +the block reader. Smaller block sizes will increase the memory +requirements of the corpus view's internal data structures (by 2 +integers per block). On the other hand, larger block sizes may +decrease performance for random access to the corpus. (But note that +larger block sizes will *not* decrease performance for iteration.) + +Internally, the `StreamBackedCorpusView` class maintains a partial +mapping from token index to file position, with one entry per block. +When a token with a given index *i* is requested, the corpus view +constructs it as follows: + +1. First, it searches the toknum/filepos mapping for the token index + closest to (but less than or equal to) *i*. + +2. Then, starting at the file position corresponding to that index, it + reads one block at a time using the block reader until it reaches + the requested token. + +The toknum/filepos mapping is created lazily: it is initially empty, +but every time a new block is read, the block's initial token is added +to the mapping. (Thus, the toknum/filepos map has one entry per +block.) + +You can create your own corpus view in one of two ways: + +1. Call the `StreamBackedCorpusView` constructor, and provide your + block reader function via the ``block_reader`` argument. + +2. Subclass `StreamBackedCorpusView`, and override the + `read_block()` method. + +The first option is usually easier, but the second option can allow +you to write a single `read_block` method whose behavior can be +customized by different parameters to the subclass's constructor. For +an example of this design pattern, see the `TaggedCorpusView` class, +which is used by `TaggedCorpusView`. + +---------------- +Regression Tests +---------------- + +The following helper functions are used to create and then delete +testing corpora that are stored in temporary directories. These +testing corpora are used to make sure the readers work correctly. + + >>> import tempfile, os.path, textwrap + >>> def make_testcorpus(ext='', **fileids): + ... root = tempfile.mkdtemp() + ... for fileid, contents in fileids.items(): + ... fileid += ext + ... f = open(os.path.join(root, fileid), 'w') + ... f.write(textwrap.dedent(contents)) + ... f.close() + ... return root + >>> def del_testcorpus(root): + ... for fileid in os.listdir(root): + ... os.remove(os.path.join(root, fileid)) + ... os.rmdir(root) + +Plaintext Corpus Reader +======================= +The plaintext corpus reader is used to access corpora that consist of +unprocessed plaintext data. It assumes that paragraph breaks are +indicated by blank lines. Sentences and words can be tokenized using +the default tokenizers, or by custom tokenizers specified as +parameters to the constructor. + + >>> root = make_testcorpus(ext='.txt', + ... a="""\ + ... This is the first sentence. Here is another + ... sentence! And here's a third sentence. + ... + ... This is the second paragraph. Tokenization is currently + ... fairly simple, so the period in Mr. gets tokenized. + ... """, + ... b="""This is the second file.""") + + >>> from nltk.corpus.reader.plaintext import PlaintextCorpusReader + +The list of documents can be specified explicitly, or implicitly (using a +regexp). The ``ext`` argument specifies a file extension. + + >>> corpus = PlaintextCorpusReader(root, ['a.txt', 'b.txt']) + >>> corpus.fileids() + ['a.txt', 'b.txt'] + >>> corpus = PlaintextCorpusReader(root, r'.*\.txt') + >>> corpus.fileids() + ['a.txt', 'b.txt'] + +The directory containing the corpus is corpus.root: + + >>> str(corpus.root) == str(root) + True + +We can get a list of words, or the raw string: + + >>> corpus.words() + ['This', 'is', 'the', 'first', 'sentence', '.', ...] + >>> corpus.raw()[:40] + 'This is the first sentence. Here is ano' + +Check that reading individual documents works, and reading all documents at +once works: + + >>> len(corpus.words()), [len(corpus.words(d)) for d in corpus.fileids()] + (46, [40, 6]) + >>> corpus.words('a.txt') + ['This', 'is', 'the', 'first', 'sentence', '.', ...] + >>> corpus.words('b.txt') + ['This', 'is', 'the', 'second', 'file', '.'] + >>> corpus.words()[:4], corpus.words()[-4:] + (['This', 'is', 'the', 'first'], ['the', 'second', 'file', '.']) + +We're done with the test corpus: + + >>> del_testcorpus(root) + +Test the plaintext corpora that come with nltk: + + >>> from nltk.corpus import abc, genesis, inaugural + >>> from nltk.corpus import state_union, webtext + >>> for corpus in (abc, genesis, inaugural, state_union, + ... webtext): + ... print(str(corpus).replace('\\\\','/')) + ... print(' ', repr(corpus.fileids())[:60]) + ... print(' ', repr(corpus.words()[:10])[:60]) + + ['rural.txt', 'science.txt'] + ['PM', 'denies', 'knowledge', 'of', 'AWB', ... + + ['english-kjv.txt', 'english-web.txt', 'finnish.txt', ... + ['In', 'the', 'beginning', 'God', 'created', 'the', ... + + ['1789-Washington.txt', '1793-Washington.txt', ... + ['Fellow', '-', 'Citizens', 'of', 'the', 'Senate', ... + + ['1945-Truman.txt', '1946-Truman.txt', ... + ['PRESIDENT', 'HARRY', 'S', '.', 'TRUMAN', "'", ... + + ['firefox.txt', 'grail.txt', 'overheard.txt', ... + ['Cookie', 'Manager', ':', '"', 'Don', "'", 't', ... + + +Tagged Corpus Reader +==================== +The Tagged Corpus reader can give us words, sentences, and paragraphs, +each tagged or untagged. All of the read methods can take one item +(in which case they return the contents of that file) or a list of +documents (in which case they concatenate the contents of those files). +By default, they apply to all documents in the corpus. + + >>> root = make_testcorpus( + ... a="""\ + ... This/det is/verb the/det first/adj sentence/noun ./punc + ... Here/det is/verb another/adj sentence/noun ./punc + ... Note/verb that/comp you/pron can/verb use/verb \ + ... any/noun tag/noun set/noun + ... + ... This/det is/verb the/det second/adj paragraph/noun ./punc + ... word/n without/adj a/det tag/noun :/: hello ./punc + ... """, + ... b="""\ + ... This/det is/verb the/det second/adj file/noun ./punc + ... """) + + >>> from nltk.corpus.reader.tagged import TaggedCorpusReader + >>> corpus = TaggedCorpusReader(root, list('ab')) + >>> corpus.fileids() + ['a', 'b'] + >>> str(corpus.root) == str(root) + True + >>> corpus.words() + ['This', 'is', 'the', 'first', 'sentence', '.', ...] + >>> corpus.sents() + [['This', 'is', 'the', 'first', ...], ['Here', 'is', 'another'...], ...] + >>> corpus.paras() + [[['This', ...], ['Here', ...], ...], [['This', ...], ...], ...] + >>> corpus.tagged_words() + [('This', 'DET'), ('is', 'VERB'), ('the', 'DET'), ...] + >>> corpus.tagged_sents() + [[('This', 'DET'), ('is', 'VERB'), ...], [('Here', 'DET'), ...], ...] + >>> corpus.tagged_paras() + [[[('This', 'DET'), ...], ...], [[('This', 'DET'), ...], ...], ...] + >>> corpus.raw()[:40] + 'This/det is/verb the/det first/adj sente' + >>> len(corpus.words()), [len(corpus.words(d)) for d in corpus.fileids()] + (38, [32, 6]) + >>> len(corpus.sents()), [len(corpus.sents(d)) for d in corpus.fileids()] + (6, [5, 1]) + >>> len(corpus.paras()), [len(corpus.paras(d)) for d in corpus.fileids()] + (3, [2, 1]) + >>> print(corpus.words('a')) + ['This', 'is', 'the', 'first', 'sentence', '.', ...] + >>> print(corpus.words('b')) + ['This', 'is', 'the', 'second', 'file', '.'] + >>> del_testcorpus(root) + +The Brown Corpus uses the tagged corpus reader: + + >>> from nltk.corpus import brown + >>> brown.fileids() + ['ca01', 'ca02', 'ca03', 'ca04', 'ca05', 'ca06', 'ca07', ...] + >>> brown.categories() + ['adventure', 'belles_lettres', 'editorial', 'fiction', 'government', 'hobbies', 'humor', + 'learned', 'lore', 'mystery', 'news', 'religion', 'reviews', 'romance', 'science_fiction'] + >>> print(repr(brown.root).replace('\\\\','/')) + FileSystemPathPointer('.../corpora/brown') + >>> brown.words() + ['The', 'Fulton', 'County', 'Grand', 'Jury', ...] + >>> brown.sents() + [['The', 'Fulton', 'County', 'Grand', ...], ...] + >>> brown.paras() + [[['The', 'Fulton', 'County', ...]], [['The', 'jury', ...]], ...] + >>> brown.tagged_words() + [('The', 'AT'), ('Fulton', 'NP-TL'), ...] + >>> brown.tagged_sents() + [[('The', 'AT'), ('Fulton', 'NP-TL'), ('County', 'NN-TL'), ...], ...] + >>> brown.tagged_paras() + [[[('The', 'AT'), ...]], [[('The', 'AT'), ...]], ...] + +Categorized Markdown Corpus Reader +================================== + +This corpus reader class provides additional methods to select features +present in markdown documents. + +First, let's make a test corpus: + + >>> root = make_testcorpus(ext='.md', + ... a="""\ + ... # Section One + ... Here's the first sentence of section one. Then the second sentence. + ... + ... First section, second paragraph. Let's add a [link](https://example.com). + ... + ... # Section Two + ... This section is more fun. It contains an ![image](https://example.com/image.png) followed by a list: + ... + ... 1. First list item + ... 2. Second list item + ... """, + ... b="""\ + ... This is the second file. It starts without a section, but then adds one. + ... + ... # Section 1 + ... This section has a sub-section! + ... + ... ## Section 1a + ... And here's a quote: + ... + ... > Carpe diem + ... + ... HTML tags are removed. + ... """) + +Now, import the ``CategorizedMarkdownCorpusReader`` class. + + >>> from nltk.corpus.reader.markdown import CategorizedMarkdownCorpusReader + +Note that this class requires the following Python packages: + +- ``markdown-it-py`` +- ``mdit-py-plugins`` +- ``mdit-plain`` + +The corpus provides usual methods like ``words()``, ``sents()``, +``paras()``, etc. Each of these methods accepts a list of file IDs +which can be a Python list or a comma-separated string. + + >>> corpus = CategorizedMarkdownCorpusReader(root, ['a.md', 'b.md']) + >>> corpus.fileids() + ['a.md', 'b.md'] + >>> corpus.words() + ['Section', 'One', 'Here', "'", 's', 'the', 'first', ...] + >>> corpus.words('b.md') + ['This', 'is', 'the', 'second', 'file', '.', 'It', ...] + >>> corpus.words('a.md, b.md') == corpus.words(['a.md', 'b.md']) + True + +Here are some methods specific to the +``CategorizedMarkdownCorpusReader`` class to retrieve markdown features: + + >>> corpus.links() + [Link(label='link', href='https://example.com', title=None)] + >>> corpus.images() + [Image(label='image', src='https://example.com/image.png', title=None)] + >>> corpus.lists() + [List(is_ordered=True, items=['First list item', 'Second list item'])] + >>> corpus.blockquotes() + [MarkdownBlock(content='Carpe diem')] + +The corpus can also be broken down into sections based on markdown headings: + + >>> corpus.sections('a.md') + [MarkdownSection(content='Section One\n\nHer...'), MarkdownSection(content='Section Two\n\nThi...')] + >>> for s in corpus.sections(): + ... print(F"{s.heading} (level {s.level})") + ... + Section One (level 1) + Section Two (level 1) + Section 1 (level 1) + Section 1a (level 2) + +Categories +---------- + +The ``CategorizedMarkdownCorpusReader`` relies on YAML front matter to +read metadata defined in markdown documents. This metadata is optional, +and may define one or more categories for each document. + +Let's create another test corpus, this time with some metadata: + + >>> del_testcorpus(root) + >>> root = make_testcorpus(ext='.md', + ... a="""\ + ... --- + ... tags: + ... - tag1 + ... - tag2 + ... --- + ... Document A: category metadata. + ... """, + ... b="""\ + ... --- + ... author: NLTK + ... tags: + ... - tag2 + ... - tag3 + ... --- + ... Document B: additional metadata. + ... """, + ... c="""\ + ... Document C: no metadata. + ... """) + +Load the new corpus and see the ``metadata()`` and ``categories()`` +methods in action: + + >>> fileids = ['a.md', 'b.md', 'c.md'] + >>> corpus = CategorizedMarkdownCorpusReader(root, fileids) + >>> corpus.metadata() + [{'tags': ['tag1', 'tag2']}, {'author': 'NLTK', 'tags': ['tag2', 'tag3']}] + >>> for fid in fileids: + ... print(fid, corpus.metadata(fid)) + ... + a.md [{'tags': ['tag1', 'tag2']}] + b.md [{'author': 'NLTK', 'tags': ['tag2', 'tag3']}] + c.md [] + >>> corpus.categories() + ['tag1', 'tag2', 'tag3'] + >>> corpus.categories('a.md') + ['tag1', 'tag2'] + +The ``fileids()`` method also accepts categories and returns all file +IDs that match any of the specified categories: + + >>> corpus.fileids('tag2') + ['a.md', 'b.md'] + >>> del_testcorpus(root) + +Verbnet Corpus Reader +===================== + +Make sure we're picking up the right number of elements: + + >>> from nltk.corpus import verbnet + >>> len(verbnet.lemmas()) + 3621 + >>> len(verbnet.wordnetids()) + 4953 + >>> len(verbnet.classids()) + 429 + +Selecting classids based on various selectors: + + >>> verbnet.classids(lemma='take') + ['bring-11.3', 'characterize-29.2', 'convert-26.6.2', 'cost-54.2', + 'fit-54.3', 'performance-26.7-2', 'steal-10.5'] + >>> verbnet.classids(wordnetid='lead%2:38:01') + ['accompany-51.7'] + >>> verbnet.classids(fileid='approve-77.xml') + ['approve-77'] + >>> verbnet.classids(classid='admire-31.2') # subclasses + ['admire-31.2-1'] + +vnclass() accepts filenames, long ids, and short ids: + + >>> a = ElementTree.tostring(verbnet.vnclass('admire-31.2.xml')) + >>> b = ElementTree.tostring(verbnet.vnclass('admire-31.2')) + >>> c = ElementTree.tostring(verbnet.vnclass('31.2')) + >>> a == b == c + True + +fileids() can be used to get files based on verbnet class ids: + + >>> verbnet.fileids('admire-31.2') + ['admire-31.2.xml'] + >>> verbnet.fileids(['admire-31.2', 'obtain-13.5.2']) + ['admire-31.2.xml', 'obtain-13.5.2.xml'] + >>> verbnet.fileids('badidentifier') + Traceback (most recent call last): + . . . + ValueError: vnclass identifier 'badidentifier' not found + +longid() and shortid() can be used to convert identifiers: + + >>> verbnet.longid('31.2') + 'admire-31.2' + >>> verbnet.longid('admire-31.2') + 'admire-31.2' + >>> verbnet.shortid('31.2') + '31.2' + >>> verbnet.shortid('admire-31.2') + '31.2' + >>> verbnet.longid('badidentifier') + Traceback (most recent call last): + . . . + ValueError: vnclass identifier 'badidentifier' not found + >>> verbnet.shortid('badidentifier') + Traceback (most recent call last): + . . . + ValueError: vnclass identifier 'badidentifier' not found + +Corpus View Regression Tests +============================ + +Select some corpus files to play with: + + >>> import nltk.data + >>> # A very short file (160 chars): + >>> f1 = nltk.data.find('corpora/inaugural/README') + >>> # A relatively short file (791 chars): + >>> f2 = nltk.data.find('corpora/inaugural/1793-Washington.txt') + >>> # A longer file (32k chars): + >>> f3 = nltk.data.find('corpora/inaugural/1909-Taft.txt') + >>> fileids = [f1, f2, f3] + + +Concatenation +------------- +Check that concatenation works as intended. + + >>> from nltk.corpus.reader.util import * + + >>> c1 = StreamBackedCorpusView(f1, read_whitespace_block, encoding='utf-8') + >>> c2 = StreamBackedCorpusView(f2, read_whitespace_block, encoding='utf-8') + >>> c3 = StreamBackedCorpusView(f3, read_whitespace_block, encoding='utf-8') + >>> c123 = c1+c2+c3 + >>> print(c123) + ['C-Span', 'Inaugural', 'Address', 'Corpus', 'US', ...] + + >>> l1 = f1.open(encoding='utf-8').read().split() + >>> l2 = f2.open(encoding='utf-8').read().split() + >>> l3 = f3.open(encoding='utf-8').read().split() + >>> l123 = l1+l2+l3 + + >>> list(c123) == l123 + True + + >>> (c1+c2+c3)[100] == l123[100] + True + +Slicing +------- +First, do some tests with fairly small slices. These will all +generate tuple values. + + >>> from nltk.util import LazySubsequence + >>> c1 = StreamBackedCorpusView(f1, read_whitespace_block, encoding='utf-8') + >>> l1 = f1.open(encoding='utf-8').read().split() + >>> print(len(c1)) + 21 + >>> len(c1) < LazySubsequence.MIN_SIZE + True + +Choose a list of indices, based on the length, that covers the +important corner cases: + + >>> indices = [-60, -30, -22, -21, -20, -1, + ... 0, 1, 10, 20, 21, 22, 30, 60] + +Test slicing with explicit start & stop value: + + >>> for s in indices: + ... for e in indices: + ... assert list(c1[s:e]) == l1[s:e] + +Test slicing with stop=None: + + >>> for s in indices: + ... assert list(c1[s:]) == l1[s:] + +Test slicing with start=None: + + >>> for e in indices: + ... assert list(c1[:e]) == l1[:e] + +Test slicing with start=stop=None: + + >>> list(c1[:]) == list(l1[:]) + True + +Next, we'll do some tests with much longer slices. These will +generate LazySubsequence objects. + + >>> c3 = StreamBackedCorpusView(f3, read_whitespace_block, encoding='utf-8') + >>> l3 = f3.open(encoding='utf-8').read().split() + >>> print(len(c3)) + 5430 + >>> len(c3) > LazySubsequence.MIN_SIZE*2 + True + +Choose a list of indices, based on the length, that covers the +important corner cases: + + >>> indices = [-12000, -6000, -5431, -5430, -5429, -3000, -200, -1, + ... 0, 1, 200, 3000, 5000, 5429, 5430, 5431, 6000, 12000] + +Test slicing with explicit start & stop value: + + >>> for s in indices: + ... for e in indices: + ... assert list(c3[s:e]) == l3[s:e] + +Test slicing with stop=None: + + >>> for s in indices: + ... assert list(c3[s:]) == l3[s:] + +Test slicing with start=None: + + >>> for e in indices: + ... assert list(c3[:e]) == l3[:e] + +Test slicing with start=stop=None: + + >>> list(c3[:]) == list(l3[:]) + True + +Multiple Iterators +------------------ +If multiple iterators are created for the same corpus view, their +iteration can be interleaved: + + >>> c3 = StreamBackedCorpusView(f3, read_whitespace_block) + >>> iterators = [c3.iterate_from(n) for n in [0,15,30,45]] + >>> for i in range(15): + ... for iterator in iterators: + ... print('%-15s' % next(iterator), end=' ') + ... print() + My a duties in + fellow heavy of a + citizens: weight the proper + Anyone of office sense + who responsibility. upon of + has If which the + taken not, he obligation + the he is which + oath has about the + I no to oath + have conception enter, imposes. + just of or The + taken the he office + must powers is of + feel and lacking an + +SeekableUnicodeStreamReader +=========================== + +The file-like objects provided by the ``codecs`` module unfortunately +suffer from a bug that prevents them from working correctly with +corpus view objects. In particular, although the expose ``seek()`` +and ``tell()`` methods, those methods do not exhibit the expected +behavior, because they are not synchronized with the internal buffers +that are kept by the file-like objects. For example, the ``tell()`` +method will return the file position at the end of the buffers (whose +contents have not yet been returned by the stream); and therefore this +file position can not be used to return to the 'current' location in +the stream (since ``seek()`` has no way to reconstruct the buffers). + +To get around these problems, we define a new class, +`SeekableUnicodeStreamReader`, to act as a file-like interface to +files containing encoded unicode data. This class is loosely based on +the ``codecs.StreamReader`` class. To construct a new reader, we call +the constructor with an underlying stream and an encoding name: + + >>> from io import StringIO, BytesIO + >>> from nltk.data import SeekableUnicodeStreamReader + >>> stream = BytesIO(b"""\ + ... This is a test file. + ... It is encoded in ascii. + ... """.decode('ascii').encode('ascii')) + >>> reader = SeekableUnicodeStreamReader(stream, 'ascii') + +`SeekableUnicodeStreamReader`\ s support all of the normal operations +supplied by a read-only stream. Note that all of the read operations +return ``unicode`` objects (not ``str`` objects). + + >>> reader.read() # read the entire file. + 'This is a test file.\nIt is encoded in ascii.\n' + >>> reader.seek(0) # rewind to the start. + >>> reader.read(5) # read at most 5 bytes. + 'This ' + >>> reader.readline() # read to the end of the line. + 'is a test file.\n' + >>> reader.seek(0) # rewind to the start. + >>> for line in reader: + ... print(repr(line)) # iterate over lines + 'This is a test file.\n' + 'It is encoded in ascii.\n' + >>> reader.seek(0) # rewind to the start. + >>> reader.readlines() # read a list of line strings + ['This is a test file.\n', 'It is encoded in ascii.\n'] + >>> reader.close() + +Size argument to ``read()`` +--------------------------- +The ``size`` argument to ``read()`` specifies the maximum number of +*bytes* to read, not the maximum number of *characters*. Thus, for +encodings that use multiple bytes per character, it may return fewer +characters than the ``size`` argument: + + >>> stream = BytesIO(b"""\ + ... This is a test file. + ... It is encoded in utf-16. + ... """.decode('ascii').encode('utf-16')) + >>> reader = SeekableUnicodeStreamReader(stream, 'utf-16') + >>> reader.read(10) + 'This ' + +If a read block ends in the middle of the byte string encoding a +single character, then that byte string is stored in an internal +buffer, and re-used on the next call to ``read()``. However, if the +size argument is too small to read even a single character, even +though at least one character is available, then the ``read()`` method +will read additional bytes until it can return a single character. +This ensures that the ``read()`` method does not return an empty +string, which could be mistaken for indicating the end of the file. + + >>> reader.seek(0) # rewind to the start. + >>> reader.read(1) # we actually need to read 4 bytes + 'T' + >>> int(reader.tell()) + 4 + +The ``readline()`` method may read more than a single line of text, in +which case it stores the text that it does not return in a buffer. If +this buffer is not empty, then its contents will be included in the +value returned by the next call to ``read()``, regardless of the +``size`` argument, since they are available without reading any new +bytes from the stream: + + >>> reader.seek(0) # rewind to the start. + >>> reader.readline() # stores extra text in a buffer + 'This is a test file.\n' + >>> print(reader.linebuffer) # examine the buffer contents + ['It is encoded i'] + >>> reader.read(0) # returns the contents of the buffer + 'It is encoded i' + >>> print(reader.linebuffer) # examine the buffer contents + None + +Seek and Tell +------------- +In addition to these basic read operations, +`SeekableUnicodeStreamReader` also supports the ``seek()`` and +``tell()`` operations. However, some care must still be taken when +using these operations. In particular, the only file offsets that +should be passed to ``seek()`` are ``0`` and any offset that has been +returned by ``tell``. + + >>> stream = BytesIO(b"""\ + ... This is a test file. + ... It is encoded in utf-16. + ... """.decode('ascii').encode('utf-16')) + >>> reader = SeekableUnicodeStreamReader(stream, 'utf-16') + >>> reader.read(20) + 'This is a ' + >>> pos = reader.tell(); print(pos) + 22 + >>> reader.read(20) + 'test file.' + >>> reader.seek(pos) # rewind to the position from tell. + >>> reader.read(20) + 'test file.' + +The ``seek()`` and ``tell()`` methods work property even when +``readline()`` is used. + + >>> stream = BytesIO(b"""\ + ... This is a test file. + ... It is encoded in utf-16. + ... """.decode('ascii').encode('utf-16')) + >>> reader = SeekableUnicodeStreamReader(stream, 'utf-16') + >>> reader.readline() + 'This is a test file.\n' + >>> pos = reader.tell(); print(pos) + 44 + >>> reader.readline() + 'It is encoded in utf-16.\n' + >>> reader.seek(pos) # rewind to the position from tell. + >>> reader.readline() + 'It is encoded in utf-16.\n' + + +Squashed Bugs +============= + +svn 5276 fixed a bug in the comment-stripping behavior of +parse_sexpr_block. + + >>> from io import StringIO + >>> from nltk.corpus.reader.util import read_sexpr_block + >>> f = StringIO(b""" + ... (a b c) + ... # This line is a comment. + ... (d e f\ng h)""".decode('ascii')) + >>> print(read_sexpr_block(f, block_size=38, comment_char='#')) + ['(a b c)'] + >>> print(read_sexpr_block(f, block_size=38, comment_char='#')) + ['(d e f\ng h)'] + +svn 5277 fixed a bug in parse_sexpr_block, which would cause it to +enter an infinite loop if a file ended mid-sexpr, or ended with a +token that was not followed by whitespace. A related bug caused +an infinite loop if the corpus ended in an unmatched close paren -- +this was fixed in svn 5279 + + >>> f = StringIO(b""" + ... This file ends mid-sexpr + ... (hello (world""".decode('ascii')) + >>> for i in range(3): print(read_sexpr_block(f)) + ['This', 'file', 'ends', 'mid-sexpr'] + ['(hello (world'] + [] + + >>> f = StringIO(b"This file has no trailing whitespace.".decode('ascii')) + >>> for i in range(3): print(read_sexpr_block(f)) + ['This', 'file', 'has', 'no', 'trailing'] + ['whitespace.'] + [] + + >>> # Bug fixed in 5279: + >>> f = StringIO(b"a b c)".decode('ascii')) + >>> for i in range(3): print(read_sexpr_block(f)) + ['a', 'b'] + ['c)'] + [] + + +svn 5624 & 5265 fixed a bug in ConcatenatedCorpusView, which caused it +to return the wrong items when indexed starting at any index beyond +the first file. + + >>> import nltk + >>> sents = nltk.corpus.brown.sents() + >>> print(sents[6000]) + ['Cholesterol', 'and', 'thyroid'] + >>> print(sents[6000]) + ['Cholesterol', 'and', 'thyroid'] + +svn 5728 fixed a bug in Categorized*CorpusReader, which caused them +to return words from *all* files when just one file was specified. + + >>> from nltk.corpus import reuters + >>> reuters.words('training/13085') + ['SNYDER', '&', 'lt', ';', 'SOI', '>', 'MAKES', ...] + >>> reuters.words('training/5082') + ['SHEPPARD', 'RESOURCES', 'TO', 'MERGE', 'WITH', ...] + +svn 7227 fixed a bug in the qc corpus reader, which prevented +access to its tuples() method + + >>> from nltk.corpus import qc + >>> qc.tuples('test.txt') + [('NUM:dist', 'How far is it from Denver to Aspen ?'), ('LOC:city', 'What county is Modesto , California in ?'), ...] + +Ensure that KEYWORD from `comparative_sents.py` no longer contains a ReDoS vulnerability. + + >>> import re + >>> import time + >>> from nltk.corpus.reader.comparative_sents import KEYWORD + >>> sizes = { + ... "short": 4000, + ... "long": 40000 + ... } + >>> exec_times = { + ... "short": [], + ... "long": [], + ... } + >>> for size_name, size in sizes.items(): + ... for j in range(9): + ... start_t = time.perf_counter() + ... payload = "( " + "(" * size + ... output = KEYWORD.findall(payload) + ... exec_times[size_name].append(time.perf_counter() - start_t) + ... exec_times[size_name] = sorted(exec_times[size_name])[4] # Get the median + +Ideally, the execution time of such a regular expression is linear +in the length of the input. As such, we would expect exec_times["long"] +to be roughly 10 times as big as exec_times["short"]. +With the ReDoS in place, it took roughly 80 times as long. +For now, we accept values below 30 (times as long), due to the potential +for variance. This ensures that the ReDoS has certainly been reduced, +if not removed. + + >>> exec_times["long"] / exec_times["short"] < 30 # doctest: +SKIP + True diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/crubadan.doctest b/openflamingo/lib/python3.10/site-packages/nltk/test/crubadan.doctest new file mode 100644 index 0000000000000000000000000000000000000000..088ee23c579ec632da2532ef1d3a93d0bae72f89 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/crubadan.doctest @@ -0,0 +1,65 @@ +.. Copyright (C) 2001-2024 NLTK Project +.. For license information, see LICENSE.TXT + +Crubadan Corpus Reader +====================== + +Crubadan is an NLTK corpus reader for ngram files provided +by the Crubadan project. It supports several languages. + + >>> from nltk.corpus import crubadan + >>> crubadan.langs() + ['abk', 'abn',..., 'zpa', 'zul'] + +---------------------------------------- +Language code mapping and helper methods +---------------------------------------- + +The web crawler that generates the 3-gram frequencies works at the +level of "writing systems" rather than languages. Writing systems +are assigned internal 2-3 letter codes that require mapping to the +standard ISO 639-3 codes. For more information, please refer to +the README in nltk_data/crubadan folder after installing it. + +To translate ISO 639-3 codes to "Crubadan Code": + + >>> crubadan.iso_to_crubadan('eng') + 'en' + >>> crubadan.iso_to_crubadan('fra') + 'fr' + >>> crubadan.iso_to_crubadan('aaa') + +In reverse, print ISO 639-3 code if we have the Crubadan Code: + + >>> crubadan.crubadan_to_iso('en') + 'eng' + >>> crubadan.crubadan_to_iso('fr') + 'fra' + >>> crubadan.crubadan_to_iso('aa') + +--------------------------- +Accessing ngram frequencies +--------------------------- + +On initialization the reader will create a dictionary of every +language supported by the Crubadan project, mapping the ISO 639-3 +language code to its corresponding ngram frequency. + +You can access individual language FreqDist and the ngrams within them as follows: + + >>> english_fd = crubadan.lang_freq('eng') + >>> english_fd['the'] + 728135 + +Above accesses the FreqDist of English and returns the frequency of the ngram 'the'. +A ngram that isn't found within the language will return 0: + + >>> english_fd['sometest'] + 0 + +A language that isn't supported will raise an exception: + + >>> crubadan.lang_freq('elvish') + Traceback (most recent call last): + ... + RuntimeError: Unsupported language. diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/framenet.doctest b/openflamingo/lib/python3.10/site-packages/nltk/test/framenet.doctest new file mode 100644 index 0000000000000000000000000000000000000000..dc28d2c0fa5ca339bfe8d426e74e8211b7fad1d5 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/framenet.doctest @@ -0,0 +1,288 @@ +.. Copyright (C) 2001-2024 NLTK Project +.. For license information, see LICENSE.TXT + +======== +FrameNet +======== + +The FrameNet corpus is a lexical database of English that is both human- +and machine-readable, based on annotating examples of how words are used +in actual texts. FrameNet is based on a theory of meaning called Frame +Semantics, deriving from the work of Charles J. Fillmore and colleagues. +The basic idea is straightforward: that the meanings of most words can +best be understood on the basis of a semantic frame: a description of a +type of event, relation, or entity and the participants in it. For +example, the concept of cooking typically involves a person doing the +cooking (Cook), the food that is to be cooked (Food), something to hold +the food while cooking (Container) and a source of heat +(Heating_instrument). In the FrameNet project, this is represented as a +frame called Apply_heat, and the Cook, Food, Heating_instrument and +Container are called frame elements (FEs). Words that evoke this frame, +such as fry, bake, boil, and broil, are called lexical units (LUs) of +the Apply_heat frame. The job of FrameNet is to define the frames +and to annotate sentences to show how the FEs fit syntactically around +the word that evokes the frame. + +------ +Frames +------ + +A Frame is a script-like conceptual structure that describes a +particular type of situation, object, or event along with the +participants and props that are needed for that Frame. For +example, the "Apply_heat" frame describes a common situation +involving a Cook, some Food, and a Heating_Instrument, and is +evoked by words such as bake, blanch, boil, broil, brown, +simmer, steam, etc. + +We call the roles of a Frame "frame elements" (FEs) and the +frame-evoking words are called "lexical units" (LUs). + +FrameNet includes relations between Frames. Several types of +relations are defined, of which the most important are: + +- Inheritance: An IS-A relation. The child frame is a subtype + of the parent frame, and each FE in the parent is bound to + a corresponding FE in the child. An example is the + "Revenge" frame which inherits from the + "Rewards_and_punishments" frame. + +- Using: The child frame presupposes the parent frame as + background, e.g the "Speed" frame "uses" (or presupposes) + the "Motion" frame; however, not all parent FEs need to be + bound to child FEs. + +- Subframe: The child frame is a subevent of a complex event + represented by the parent, e.g. the "Criminal_process" frame + has subframes of "Arrest", "Arraignment", "Trial", and + "Sentencing". + +- Perspective_on: The child frame provides a particular + perspective on an un-perspectivized parent frame. A pair of + examples consists of the "Hiring" and "Get_a_job" frames, + which perspectivize the "Employment_start" frame from the + Employer's and the Employee's point of view, respectively. + +To get a list of all of the Frames in FrameNet, you can use the +`frames()` function. If you supply a regular expression pattern to the +`frames()` function, you will get a list of all Frames whose names match +that pattern: + + >>> from pprint import pprint + >>> from operator import itemgetter + >>> from nltk.corpus import framenet as fn + >>> from nltk.corpus.reader.framenet import PrettyList + >>> x = fn.frames(r'(?i)crim') + >>> x.sort(key=itemgetter('ID')) + >>> x + [, , ...] + >>> PrettyList(sorted(x, key=itemgetter('ID'))) + [, , ...] + +To get the details of a particular Frame, you can use the `frame()` +function passing in the frame number: + + >>> from pprint import pprint + >>> from nltk.corpus import framenet as fn + >>> f = fn.frame(202) + >>> f.ID + 202 + >>> f.name + 'Arrest' + >>> f.definition + "Authorities charge a Suspect, who is under suspicion of having committed a crime..." + >>> len(f.lexUnit) + 11 + >>> pprint(sorted([x for x in f.FE])) + ['Authorities', + 'Charges', + 'Co-participant', + 'Manner', + 'Means', + 'Offense', + 'Place', + 'Purpose', + 'Source_of_legal_authority', + 'Suspect', + 'Time', + 'Type'] + >>> pprint(f.frameRelations) + [ Child=Arrest>, Component=Arrest>, ...] + +The `frame()` function shown above returns a dict object containing +detailed information about the Frame. See the documentation on the +`frame()` function for the specifics. + +You can also search for Frames by their Lexical Units (LUs). The +`frames_by_lemma()` function returns a list of all frames that contain +LUs in which the 'name' attribute of the LU matches the given regular +expression. Note that LU names are composed of "lemma.POS", where the +"lemma" part can be made up of either a single lexeme (e.g. 'run') or +multiple lexemes (e.g. 'a little') (see below). + + >>> PrettyList(sorted(fn.frames_by_lemma(r'(?i)a little'), key=itemgetter('ID'))) + [, ] + +------------- +Lexical Units +------------- + +A lexical unit (LU) is a pairing of a word with a meaning. For +example, the "Apply_heat" Frame describes a common situation +involving a Cook, some Food, and a Heating Instrument, and is +_evoked_ by words such as bake, blanch, boil, broil, brown, +simmer, steam, etc. These frame-evoking words are the LUs in the +Apply_heat frame. Each sense of a polysemous word is a different +LU. + +We have used the word "word" in talking about LUs. The reality +is actually rather complex. When we say that the word "bake" is +polysemous, we mean that the lemma "bake.v" (which has the +word-forms "bake", "bakes", "baked", and "baking") is linked to +three different frames: + +- Apply_heat: "Michelle baked the potatoes for 45 minutes." + +- Cooking_creation: "Michelle baked her mother a cake for her birthday." + +- Absorb_heat: "The potatoes have to bake for more than 30 minutes." + +These constitute three different LUs, with different +definitions. + +Multiword expressions such as "given name" and hyphenated words +like "shut-eye" can also be LUs. Idiomatic phrases such as +"middle of nowhere" and "give the slip (to)" are also defined as +LUs in the appropriate frames ("Isolated_places" and "Evading", +respectively), and their internal structure is not analyzed. + +Framenet provides multiple annotated examples of each sense of a +word (i.e. each LU). Moreover, the set of examples +(approximately 20 per LU) illustrates all of the combinatorial +possibilities of the lexical unit. + +Each LU is linked to a Frame, and hence to the other words which +evoke that Frame. This makes the FrameNet database similar to a +thesaurus, grouping together semantically similar words. + +In the simplest case, frame-evoking words are verbs such as +"fried" in: + + "Matilde fried the catfish in a heavy iron skillet." + +Sometimes event nouns may evoke a Frame. For example, +"reduction" evokes "Cause_change_of_scalar_position" in: + + "...the reduction of debt levels to $665 million from $2.6 billion." + +Adjectives may also evoke a Frame. For example, "asleep" may +evoke the "Sleep" frame as in: + + "They were asleep for hours." + +Many common nouns, such as artifacts like "hat" or "tower", +typically serve as dependents rather than clearly evoking their +own frames. + +Details for a specific lexical unit can be obtained using this class's +`lus()` function, which takes an optional regular expression +pattern that will be matched against the name of the lexical unit: + + >>> from pprint import pprint + >>> PrettyList(sorted(fn.lus(r'(?i)a little'), key=itemgetter('ID'))) + [, , ...] + +You can obtain detailed information on a particular LU by calling the +`lu()` function and passing in an LU's 'ID' number: + + >>> from pprint import pprint + >>> from nltk.corpus import framenet as fn + >>> fn.lu(256).name + 'foresee.v' + >>> fn.lu(256).definition + 'COD: be aware of beforehand; predict.' + >>> fn.lu(256).frame.name + 'Expectation' + >>> fn.lu(256).lexemes[0].name + 'foresee' + +Note that LU names take the form of a dotted string (e.g. "run.v" or "a +little.adv") in which a lemma precedes the "." and a part of speech +(POS) follows the dot. The lemma may be composed of a single lexeme +(e.g. "run") or of multiple lexemes (e.g. "a little"). The list of +POSs used in the LUs is: + +v - verb +n - noun +a - adjective +adv - adverb +prep - preposition +num - numbers +intj - interjection +art - article +c - conjunction +scon - subordinating conjunction + +For more detailed information about the info that is contained in the +dict that is returned by the `lu()` function, see the documentation on +the `lu()` function. + +------------------- +Annotated Documents +------------------- + +The FrameNet corpus contains a small set of annotated documents. A list +of these documents can be obtained by calling the `docs()` function: + + >>> from pprint import pprint + >>> from nltk.corpus import framenet as fn + >>> d = fn.docs('BellRinging')[0] + >>> d.corpname + 'PropBank' + >>> d.sentence[49] + full-text sentence (...) in BellRinging: + + + [POS] 17 tags + + [POS_tagset] PENN + + [text] + [annotationSet] + + `` I live in hopes that the ringers themselves will be drawn into + ***** ******* ***** + Desir Cause_t Cause + [1] [3] [2] + + that fuller life . + ****** + Comple + [4] + (Desir=Desiring, Cause_t=Cause_to_make_noise, Cause=Cause_motion, Comple=Completeness) + + + >>> d.sentence[49].annotationSet[1] + annotation set (...): + + [status] MANUAL + + [LU] (6605) hope.n in Desiring + + [frame] (366) Desiring + + [GF] 2 relations + + [PT] 2 phrases + + [text] + [Target] + [FE] + [Noun] + + `` I live in hopes that the ringers themselves will be drawn into + - ^^^^ ^^ ***** ---------------------------------------------- + E supp su Event + + that fuller life . + ----------------- + + (E=Experiencer, su=supp) + + diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/generate.doctest b/openflamingo/lib/python3.10/site-packages/nltk/test/generate.doctest new file mode 100644 index 0000000000000000000000000000000000000000..414d3bb7265bf08351b8d2b6f09e46fda6b1d676 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/generate.doctest @@ -0,0 +1,78 @@ +.. Copyright (C) 2001-2024 NLTK Project +.. For license information, see LICENSE.TXT + +=============================================== +Generating sentences from context-free grammars +=============================================== + +An example grammar: + + >>> from nltk.parse.generate import generate, demo_grammar + >>> from nltk import CFG + >>> grammar = CFG.fromstring(demo_grammar) + >>> print(grammar) + Grammar with 13 productions (start state = S) + S -> NP VP + NP -> Det N + PP -> P NP + VP -> 'slept' + VP -> 'saw' NP + VP -> 'walked' PP + Det -> 'the' + Det -> 'a' + N -> 'man' + N -> 'park' + N -> 'dog' + P -> 'in' + P -> 'with' + +The first 10 generated sentences: + + >>> for sentence in generate(grammar, n=10): + ... print(' '.join(sentence)) + the man slept + the man saw the man + the man saw the park + the man saw the dog + the man saw a man + the man saw a park + the man saw a dog + the man walked in the man + the man walked in the park + the man walked in the dog + +All sentences of max depth 4: + + >>> for sentence in generate(grammar, depth=4): + ... print(' '.join(sentence)) + the man slept + the park slept + the dog slept + a man slept + a park slept + a dog slept + +The number of sentences of different max depths: + + >>> len(list(generate(grammar, depth=3))) + 0 + >>> len(list(generate(grammar, depth=4))) + 6 + >>> len(list(generate(grammar, depth=5))) + 42 + >>> len(list(generate(grammar, depth=6))) + 114 + >>> len(list(generate(grammar))) + 114 + +Infinite grammars will throw a RecursionError when not bounded by some ``depth``: + + >>> grammar = CFG.fromstring(""" + ... S -> A B + ... A -> B + ... B -> "b" | A + ... """) + >>> list(generate(grammar)) + Traceback (most recent call last): + ... + RuntimeError: The grammar has rule(s) that yield infinite recursion! diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/logic.doctest b/openflamingo/lib/python3.10/site-packages/nltk/test/logic.doctest new file mode 100644 index 0000000000000000000000000000000000000000..85256e311e77ba0c8c889b48c5c27ad3e139de25 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/logic.doctest @@ -0,0 +1,1096 @@ +.. Copyright (C) 2001-2024 NLTK Project +.. For license information, see LICENSE.TXT + +======================= +Logic & Lambda Calculus +======================= + +The `nltk.logic` package allows expressions of First-Order Logic (FOL) to be +parsed into ``Expression`` objects. In addition to FOL, the parser +handles lambda-abstraction with variables of higher order. + +-------- +Overview +-------- + + >>> from nltk.sem.logic import * + +The default inventory of logical constants is the following: + + >>> boolean_ops() + negation - + conjunction & + disjunction | + implication -> + equivalence <-> + >>> equality_preds() + equality = + inequality != + >>> binding_ops() + existential exists + universal all + lambda \ + +---------------- +Regression Tests +---------------- + + +Untyped Logic ++++++++++++++ + +Process logical expressions conveniently: + + >>> read_expr = Expression.fromstring + +Test for equality under alpha-conversion +======================================== + + >>> e1 = read_expr('exists x.P(x)') + >>> print(e1) + exists x.P(x) + >>> e2 = e1.alpha_convert(Variable('z')) + >>> print(e2) + exists z.P(z) + >>> e1 == e2 + True + + + >>> l = read_expr(r'\X.\X.X(X)(1)').simplify() + >>> id = read_expr(r'\X.X(X)') + >>> l == id + True + +Test numerals +============= + + >>> zero = read_expr(r'\F x.x') + >>> one = read_expr(r'\F x.F(x)') + >>> two = read_expr(r'\F x.F(F(x))') + >>> three = read_expr(r'\F x.F(F(F(x)))') + >>> four = read_expr(r'\F x.F(F(F(F(x))))') + >>> succ = read_expr(r'\N F x.F(N(F,x))') + >>> plus = read_expr(r'\M N F x.M(F,N(F,x))') + >>> mult = read_expr(r'\M N F.M(N(F))') + >>> pred = read_expr(r'\N F x.(N(\G H.H(G(F)))(\u.x)(\u.u))') + >>> v1 = ApplicationExpression(succ, zero).simplify() + >>> v1 == one + True + >>> v2 = ApplicationExpression(succ, v1).simplify() + >>> v2 == two + True + >>> v3 = ApplicationExpression(ApplicationExpression(plus, v1), v2).simplify() + >>> v3 == three + True + >>> v4 = ApplicationExpression(ApplicationExpression(mult, v2), v2).simplify() + >>> v4 == four + True + >>> v5 = ApplicationExpression(pred, ApplicationExpression(pred, v4)).simplify() + >>> v5 == two + True + +Overloaded operators also exist, for convenience. + + >>> print(succ(zero).simplify() == one) + True + >>> print(plus(one,two).simplify() == three) + True + >>> print(mult(two,two).simplify() == four) + True + >>> print(pred(pred(four)).simplify() == two) + True + + >>> john = read_expr(r'john') + >>> man = read_expr(r'\x.man(x)') + >>> walk = read_expr(r'\x.walk(x)') + >>> man(john).simplify() + + >>> print(-walk(john).simplify()) + -walk(john) + >>> print((man(john) & walk(john)).simplify()) + (man(john) & walk(john)) + >>> print((man(john) | walk(john)).simplify()) + (man(john) | walk(john)) + >>> print((man(john) > walk(john)).simplify()) + (man(john) -> walk(john)) + >>> print((man(john) < walk(john)).simplify()) + (man(john) <-> walk(john)) + +Python's built-in lambda operator can also be used with Expressions + + >>> john = VariableExpression(Variable('john')) + >>> run_var = VariableExpression(Variable('run')) + >>> run = lambda x: run_var(x) + >>> run(john) + + + +``betaConversionTestSuite.pl`` +------------------------------ + +Tests based on Blackburn & Bos' book, *Representation and Inference +for Natural Language*. + + >>> x1 = read_expr(r'\P.P(mia)(\x.walk(x))').simplify() + >>> x2 = read_expr(r'walk(mia)').simplify() + >>> x1 == x2 + True + + >>> x1 = read_expr(r'exists x.(man(x) & ((\P.exists x.(woman(x) & P(x)))(\y.love(x,y))))').simplify() + >>> x2 = read_expr(r'exists x.(man(x) & exists y.(woman(y) & love(x,y)))').simplify() + >>> x1 == x2 + True + >>> x1 = read_expr(r'\a.sleep(a)(mia)').simplify() + >>> x2 = read_expr(r'sleep(mia)').simplify() + >>> x1 == x2 + True + >>> x1 = read_expr(r'\a.\b.like(b,a)(mia)').simplify() + >>> x2 = read_expr(r'\b.like(b,mia)').simplify() + >>> x1 == x2 + True + >>> x1 = read_expr(r'\a.(\b.like(b,a)(vincent))').simplify() + >>> x2 = read_expr(r'\a.like(vincent,a)').simplify() + >>> x1 == x2 + True + >>> x1 = read_expr(r'\a.((\b.like(b,a)(vincent)) & sleep(a))').simplify() + >>> x2 = read_expr(r'\a.(like(vincent,a) & sleep(a))').simplify() + >>> x1 == x2 + True + + >>> x1 = read_expr(r'(\a.\b.like(b,a)(mia)(vincent))').simplify() + >>> x2 = read_expr(r'like(vincent,mia)').simplify() + >>> x1 == x2 + True + + >>> x1 = read_expr(r'P((\a.sleep(a)(vincent)))').simplify() + >>> x2 = read_expr(r'P(sleep(vincent))').simplify() + >>> x1 == x2 + True + + >>> x1 = read_expr(r'\A.A((\b.sleep(b)(vincent)))').simplify() + >>> x2 = read_expr(r'\A.A(sleep(vincent))').simplify() + >>> x1 == x2 + True + + >>> x1 = read_expr(r'\A.A(sleep(vincent))').simplify() + >>> x2 = read_expr(r'\A.A(sleep(vincent))').simplify() + >>> x1 == x2 + True + + >>> x1 = read_expr(r'(\A.A(vincent)(\b.sleep(b)))').simplify() + >>> x2 = read_expr(r'sleep(vincent)').simplify() + >>> x1 == x2 + True + + >>> x1 = read_expr(r'\A.believe(mia,A(vincent))(\b.sleep(b))').simplify() + >>> x2 = read_expr(r'believe(mia,sleep(vincent))').simplify() + >>> x1 == x2 + True + + >>> x1 = read_expr(r'(\A.(A(vincent) & A(mia)))(\b.sleep(b))').simplify() + >>> x2 = read_expr(r'(sleep(vincent) & sleep(mia))').simplify() + >>> x1 == x2 + True + + >>> x1 = read_expr(r'\A.\B.(\C.C(A(vincent))(\d.probably(d)) & (\C.C(B(mia))(\d.improbably(d))))(\f.walk(f))(\f.talk(f))').simplify() + >>> x2 = read_expr(r'(probably(walk(vincent)) & improbably(talk(mia)))').simplify() + >>> x1 == x2 + True + + >>> x1 = read_expr(r'(\a.\b.(\C.C(a,b)(\d.\f.love(d,f))))(jules)(mia)').simplify() + >>> x2 = read_expr(r'love(jules,mia)').simplify() + >>> x1 == x2 + True + + >>> x1 = read_expr(r'(\A.\B.exists c.(A(c) & B(c)))(\d.boxer(d),\d.sleep(d))').simplify() + >>> x2 = read_expr(r'exists c.(boxer(c) & sleep(c))').simplify() + >>> x1 == x2 + True + + >>> x1 = read_expr(r'\A.Z(A)(\c.\a.like(a,c))').simplify() + >>> x2 = read_expr(r'Z(\c.\a.like(a,c))').simplify() + >>> x1 == x2 + True + + >>> x1 = read_expr(r'\A.\b.A(b)(\c.\b.like(b,c))').simplify() + >>> x2 = read_expr(r'\b.(\c.\b.like(b,c)(b))').simplify() + >>> x1 == x2 + True + + >>> x1 = read_expr(r'(\a.\b.(\C.C(a,b)(\b.\a.loves(b,a))))(jules)(mia)').simplify() + >>> x2 = read_expr(r'loves(jules,mia)').simplify() + >>> x1 == x2 + True + + >>> x1 = read_expr(r'(\A.\b.(exists b.A(b) & A(b)))(\c.boxer(c))(vincent)').simplify() + >>> x2 = read_expr(r'((exists b.boxer(b)) & boxer(vincent))').simplify() + >>> x1 == x2 + True + +Test Parser +=========== + + >>> print(read_expr(r'john')) + john + >>> print(read_expr(r'x')) + x + >>> print(read_expr(r'-man(x)')) + -man(x) + >>> print(read_expr(r'--man(x)')) + --man(x) + >>> print(read_expr(r'(man(x))')) + man(x) + >>> print(read_expr(r'((man(x)))')) + man(x) + >>> print(read_expr(r'man(x) <-> tall(x)')) + (man(x) <-> tall(x)) + >>> print(read_expr(r'(man(x) <-> tall(x))')) + (man(x) <-> tall(x)) + >>> print(read_expr(r'(man(x) & tall(x) & walks(x))')) + (man(x) & tall(x) & walks(x)) + >>> print(read_expr(r'(man(x) & tall(x) & walks(x))').first) + (man(x) & tall(x)) + >>> print(read_expr(r'man(x) | tall(x) & walks(x)')) + (man(x) | (tall(x) & walks(x))) + >>> print(read_expr(r'((man(x) & tall(x)) | walks(x))')) + ((man(x) & tall(x)) | walks(x)) + >>> print(read_expr(r'man(x) & (tall(x) | walks(x))')) + (man(x) & (tall(x) | walks(x))) + >>> print(read_expr(r'(man(x) & (tall(x) | walks(x)))')) + (man(x) & (tall(x) | walks(x))) + >>> print(read_expr(r'P(x) -> Q(x) <-> R(x) | S(x) & T(x)')) + ((P(x) -> Q(x)) <-> (R(x) | (S(x) & T(x)))) + >>> print(read_expr(r'exists x.man(x)')) + exists x.man(x) + >>> print(read_expr(r'exists x.(man(x) & tall(x))')) + exists x.(man(x) & tall(x)) + >>> print(read_expr(r'exists x.(man(x) & tall(x) & walks(x))')) + exists x.(man(x) & tall(x) & walks(x)) + >>> print(read_expr(r'-P(x) & Q(x)')) + (-P(x) & Q(x)) + >>> read_expr(r'-P(x) & Q(x)') == read_expr(r'(-P(x)) & Q(x)') + True + >>> print(read_expr(r'\x.man(x)')) + \x.man(x) + >>> print(read_expr(r'\x.man(x)(john)')) + \x.man(x)(john) + >>> print(read_expr(r'\x.man(x)(john) & tall(x)')) + (\x.man(x)(john) & tall(x)) + >>> print(read_expr(r'\x.\y.sees(x,y)')) + \x y.sees(x,y) + >>> print(read_expr(r'\x y.sees(x,y)')) + \x y.sees(x,y) + >>> print(read_expr(r'\x.\y.sees(x,y)(a)')) + (\x y.sees(x,y))(a) + >>> print(read_expr(r'\x y.sees(x,y)(a)')) + (\x y.sees(x,y))(a) + >>> print(read_expr(r'\x.\y.sees(x,y)(a)(b)')) + ((\x y.sees(x,y))(a))(b) + >>> print(read_expr(r'\x y.sees(x,y)(a)(b)')) + ((\x y.sees(x,y))(a))(b) + >>> print(read_expr(r'\x.\y.sees(x,y)(a,b)')) + ((\x y.sees(x,y))(a))(b) + >>> print(read_expr(r'\x y.sees(x,y)(a,b)')) + ((\x y.sees(x,y))(a))(b) + >>> print(read_expr(r'((\x.\y.sees(x,y))(a))(b)')) + ((\x y.sees(x,y))(a))(b) + >>> print(read_expr(r'P(x)(y)(z)')) + P(x,y,z) + >>> print(read_expr(r'P(Q)')) + P(Q) + >>> print(read_expr(r'P(Q(x))')) + P(Q(x)) + >>> print(read_expr(r'(\x.exists y.walks(x,y))(x)')) + (\x.exists y.walks(x,y))(x) + >>> print(read_expr(r'exists x.(x = john)')) + exists x.(x = john) + >>> print(read_expr(r'((\P.\Q.exists x.(P(x) & Q(x)))(\x.dog(x)))(\x.bark(x))')) + ((\P Q.exists x.(P(x) & Q(x)))(\x.dog(x)))(\x.bark(x)) + >>> a = read_expr(r'exists c.exists b.A(b,c) & A(b,c)') + >>> b = read_expr(r'(exists c.(exists b.A(b,c))) & A(b,c)') + >>> print(a == b) + True + >>> a = read_expr(r'exists c.(exists b.A(b,c) & A(b,c))') + >>> b = read_expr(r'exists c.((exists b.A(b,c)) & A(b,c))') + >>> print(a == b) + True + >>> print(read_expr(r'exists x.x = y')) + exists x.(x = y) + >>> print(read_expr('A(B)(C)')) + A(B,C) + >>> print(read_expr('(A(B))(C)')) + A(B,C) + >>> print(read_expr('A((B)(C))')) + A(B(C)) + >>> print(read_expr('A(B(C))')) + A(B(C)) + >>> print(read_expr('(A)(B(C))')) + A(B(C)) + >>> print(read_expr('(((A)))(((B))(((C))))')) + A(B(C)) + >>> print(read_expr(r'A != B')) + -(A = B) + >>> print(read_expr('P(x) & x=y & P(y)')) + (P(x) & (x = y) & P(y)) + >>> try: print(read_expr(r'\walk.walk(x)')) + ... except LogicalExpressionException as e: print(e) + 'walk' is an illegal variable name. Constants may not be abstracted. + \walk.walk(x) + ^ + >>> try: print(read_expr(r'all walk.walk(john)')) + ... except LogicalExpressionException as e: print(e) + 'walk' is an illegal variable name. Constants may not be quantified. + all walk.walk(john) + ^ + >>> try: print(read_expr(r'x(john)')) + ... except LogicalExpressionException as e: print(e) + 'x' is an illegal predicate name. Individual variables may not be used as predicates. + x(john) + ^ + + >>> from nltk.sem.logic import LogicParser # hack to give access to custom quote chars + >>> lpq = LogicParser() + >>> lpq.quote_chars = [("'", "'", "\\", False)] + >>> print(lpq.parse(r"(man(x) & 'tall\'s,' (x) & walks (x) )")) + (man(x) & tall's,(x) & walks(x)) + >>> lpq.quote_chars = [("'", "'", "\\", True)] + >>> print(lpq.parse(r"'tall\'s,'")) + 'tall\'s,' + >>> print(lpq.parse(r"'spaced name(x)'")) + 'spaced name(x)' + >>> print(lpq.parse(r"-'tall\'s,'(x)")) + -'tall\'s,'(x) + >>> print(lpq.parse(r"(man(x) & 'tall\'s,' (x) & walks (x) )")) + (man(x) & 'tall\'s,'(x) & walks(x)) + + +Simplify +======== + + >>> print(read_expr(r'\x.man(x)(john)').simplify()) + man(john) + >>> print(read_expr(r'\x.((man(x)))(john)').simplify()) + man(john) + >>> print(read_expr(r'\x.\y.sees(x,y)(john, mary)').simplify()) + sees(john,mary) + >>> print(read_expr(r'\x y.sees(x,y)(john, mary)').simplify()) + sees(john,mary) + >>> print(read_expr(r'\x.\y.sees(x,y)(john)(mary)').simplify()) + sees(john,mary) + >>> print(read_expr(r'\x y.sees(x,y)(john)(mary)').simplify()) + sees(john,mary) + >>> print(read_expr(r'\x.\y.sees(x,y)(john)').simplify()) + \y.sees(john,y) + >>> print(read_expr(r'\x y.sees(x,y)(john)').simplify()) + \y.sees(john,y) + >>> print(read_expr(r'(\x.\y.sees(x,y)(john))(mary)').simplify()) + sees(john,mary) + >>> print(read_expr(r'(\x y.sees(x,y)(john))(mary)').simplify()) + sees(john,mary) + >>> print(read_expr(r'exists x.(man(x) & (\x.exists y.walks(x,y))(x))').simplify()) + exists x.(man(x) & exists y.walks(x,y)) + >>> e1 = read_expr(r'exists x.(man(x) & (\x.exists y.walks(x,y))(y))').simplify() + >>> e2 = read_expr(r'exists x.(man(x) & exists z1.walks(y,z1))') + >>> e1 == e2 + True + >>> print(read_expr(r'(\P Q.exists x.(P(x) & Q(x)))(\x.dog(x))').simplify()) + \Q.exists x.(dog(x) & Q(x)) + >>> print(read_expr(r'((\P.\Q.exists x.(P(x) & Q(x)))(\x.dog(x)))(\x.bark(x))').simplify()) + exists x.(dog(x) & bark(x)) + >>> print(read_expr(r'\P.(P(x)(y))(\a b.Q(a,b))').simplify()) + Q(x,y) + +Replace +======= + + >>> a = read_expr(r'a') + >>> x = read_expr(r'x') + >>> y = read_expr(r'y') + >>> z = read_expr(r'z') + + >>> print(read_expr(r'man(x)').replace(x.variable, a, False)) + man(a) + >>> print(read_expr(r'(man(x) & tall(x))').replace(x.variable, a, False)) + (man(a) & tall(a)) + >>> print(read_expr(r'exists x.man(x)').replace(x.variable, a, False)) + exists x.man(x) + >>> print(read_expr(r'exists x.man(x)').replace(x.variable, a, True)) + exists a.man(a) + >>> print(read_expr(r'exists x.give(x,y,z)').replace(y.variable, a, False)) + exists x.give(x,a,z) + >>> print(read_expr(r'exists x.give(x,y,z)').replace(y.variable, a, True)) + exists x.give(x,a,z) + >>> e1 = read_expr(r'exists x.give(x,y,z)').replace(y.variable, x, False) + >>> e2 = read_expr(r'exists z1.give(z1,x,z)') + >>> e1 == e2 + True + >>> e1 = read_expr(r'exists x.give(x,y,z)').replace(y.variable, x, True) + >>> e2 = read_expr(r'exists z1.give(z1,x,z)') + >>> e1 == e2 + True + >>> print(read_expr(r'\x y z.give(x,y,z)').replace(y.variable, a, False)) + \x y z.give(x,y,z) + >>> print(read_expr(r'\x y z.give(x,y,z)').replace(y.variable, a, True)) + \x a z.give(x,a,z) + >>> print(read_expr(r'\x.\y.give(x,y,z)').replace(z.variable, a, False)) + \x y.give(x,y,a) + >>> print(read_expr(r'\x.\y.give(x,y,z)').replace(z.variable, a, True)) + \x y.give(x,y,a) + >>> e1 = read_expr(r'\x.\y.give(x,y,z)').replace(z.variable, x, False) + >>> e2 = read_expr(r'\z1.\y.give(z1,y,x)') + >>> e1 == e2 + True + >>> e1 = read_expr(r'\x.\y.give(x,y,z)').replace(z.variable, x, True) + >>> e2 = read_expr(r'\z1.\y.give(z1,y,x)') + >>> e1 == e2 + True + >>> print(read_expr(r'\x.give(x,y,z)').replace(z.variable, y, False)) + \x.give(x,y,y) + >>> print(read_expr(r'\x.give(x,y,z)').replace(z.variable, y, True)) + \x.give(x,y,y) + + >>> from nltk.sem import logic + >>> logic._counter._value = 0 + >>> e1 = read_expr('e1') + >>> e2 = read_expr('e2') + >>> print(read_expr('exists e1 e2.(walk(e1) & talk(e2))').replace(e1.variable, e2, True)) + exists e2 e01.(walk(e2) & talk(e01)) + + +Variables / Free +================ + + >>> examples = [r'walk(john)', + ... r'walk(x)', + ... r'?vp(?np)', + ... r'see(john,mary)', + ... r'exists x.walk(x)', + ... r'\x.see(john,x)', + ... r'\x.see(john,x)(mary)', + ... r'P(x)', + ... r'\P.P(x)', + ... r'aa(x,bb(y),cc(z),P(w),u)', + ... r'bo(?det(?n),@x)'] + >>> examples = [read_expr(e) for e in examples] + + >>> for e in examples: + ... print('%-25s' % e, sorted(e.free())) + walk(john) [] + walk(x) [Variable('x')] + ?vp(?np) [] + see(john,mary) [] + exists x.walk(x) [] + \x.see(john,x) [] + (\x.see(john,x))(mary) [] + P(x) [Variable('P'), Variable('x')] + \P.P(x) [Variable('x')] + aa(x,bb(y),cc(z),P(w),u) [Variable('P'), Variable('u'), Variable('w'), Variable('x'), Variable('y'), Variable('z')] + bo(?det(?n),@x) [] + + >>> for e in examples: + ... print('%-25s' % e, sorted(e.constants())) + walk(john) [Variable('john')] + walk(x) [] + ?vp(?np) [Variable('?np')] + see(john,mary) [Variable('john'), Variable('mary')] + exists x.walk(x) [] + \x.see(john,x) [Variable('john')] + (\x.see(john,x))(mary) [Variable('john'), Variable('mary')] + P(x) [] + \P.P(x) [] + aa(x,bb(y),cc(z),P(w),u) [] + bo(?det(?n),@x) [Variable('?n'), Variable('@x')] + + >>> for e in examples: + ... print('%-25s' % e, sorted(e.predicates())) + walk(john) [Variable('walk')] + walk(x) [Variable('walk')] + ?vp(?np) [Variable('?vp')] + see(john,mary) [Variable('see')] + exists x.walk(x) [Variable('walk')] + \x.see(john,x) [Variable('see')] + (\x.see(john,x))(mary) [Variable('see')] + P(x) [] + \P.P(x) [] + aa(x,bb(y),cc(z),P(w),u) [Variable('aa'), Variable('bb'), Variable('cc')] + bo(?det(?n),@x) [Variable('?det'), Variable('bo')] + + >>> for e in examples: + ... print('%-25s' % e, sorted(e.variables())) + walk(john) [] + walk(x) [Variable('x')] + ?vp(?np) [Variable('?np'), Variable('?vp')] + see(john,mary) [] + exists x.walk(x) [] + \x.see(john,x) [] + (\x.see(john,x))(mary) [] + P(x) [Variable('P'), Variable('x')] + \P.P(x) [Variable('x')] + aa(x,bb(y),cc(z),P(w),u) [Variable('P'), Variable('u'), Variable('w'), Variable('x'), Variable('y'), Variable('z')] + bo(?det(?n),@x) [Variable('?det'), Variable('?n'), Variable('@x')] + + + +`normalize` + >>> print(read_expr(r'\e083.(walk(e083, z472) & talk(e092, z938))').normalize()) + \e01.(walk(e01,z3) & talk(e02,z4)) + +Typed Logic ++++++++++++ + + >>> from nltk.sem.logic import LogicParser + >>> tlp = LogicParser(True) + >>> print(tlp.parse(r'man(x)').type) + ? + >>> print(tlp.parse(r'walk(angus)').type) + ? + >>> print(tlp.parse(r'-man(x)').type) + t + >>> print(tlp.parse(r'(man(x) <-> tall(x))').type) + t + >>> print(tlp.parse(r'exists x.(man(x) & tall(x))').type) + t + >>> print(tlp.parse(r'\x.man(x)').type) + + >>> print(tlp.parse(r'john').type) + e + >>> print(tlp.parse(r'\x y.sees(x,y)').type) + > + >>> print(tlp.parse(r'\x.man(x)(john)').type) + ? + >>> print(tlp.parse(r'\x.\y.sees(x,y)(john)').type) + + >>> print(tlp.parse(r'\x.\y.sees(x,y)(john)(mary)').type) + ? + >>> print(tlp.parse(r'\P.\Q.exists x.(P(x) & Q(x))').type) + <,<,t>> + >>> print(tlp.parse(r'\x.y').type) + + >>> print(tlp.parse(r'\P.P(x)').type) + <,?> + + >>> parsed = tlp.parse('see(john,mary)') + >>> print(parsed.type) + ? + >>> print(parsed.function) + see(john) + >>> print(parsed.function.type) + + >>> print(parsed.function.function) + see + >>> print(parsed.function.function.type) + > + + >>> parsed = tlp.parse('P(x,y)') + >>> print(parsed) + P(x,y) + >>> print(parsed.type) + ? + >>> print(parsed.function) + P(x) + >>> print(parsed.function.type) + + >>> print(parsed.function.function) + P + >>> print(parsed.function.function.type) + > + + >>> print(tlp.parse(r'P').type) + ? + + >>> print(tlp.parse(r'P', {'P': 't'}).type) + t + + >>> a = tlp.parse(r'P(x)') + >>> print(a.type) + ? + >>> print(a.function.type) + + >>> print(a.argument.type) + e + + >>> a = tlp.parse(r'-P(x)') + >>> print(a.type) + t + >>> print(a.term.type) + t + >>> print(a.term.function.type) + + >>> print(a.term.argument.type) + e + + >>> a = tlp.parse(r'P & Q') + >>> print(a.type) + t + >>> print(a.first.type) + t + >>> print(a.second.type) + t + + >>> a = tlp.parse(r'(P(x) & Q(x))') + >>> print(a.type) + t + >>> print(a.first.type) + t + >>> print(a.first.function.type) + + >>> print(a.first.argument.type) + e + >>> print(a.second.type) + t + >>> print(a.second.function.type) + + >>> print(a.second.argument.type) + e + + >>> a = tlp.parse(r'\x.P(x)') + >>> print(a.type) + + >>> print(a.term.function.type) + + >>> print(a.term.argument.type) + e + + >>> a = tlp.parse(r'\P.P(x)') + >>> print(a.type) + <,?> + >>> print(a.term.function.type) + + >>> print(a.term.argument.type) + e + + >>> a = tlp.parse(r'(\x.P(x)(john)) & Q(x)') + >>> print(a.type) + t + >>> print(a.first.type) + t + >>> print(a.first.function.type) + + >>> print(a.first.function.term.function.type) + + >>> print(a.first.function.term.argument.type) + e + >>> print(a.first.argument.type) + e + + >>> a = tlp.parse(r'\x y.P(x,y)(john)(mary) & Q(x)') + >>> print(a.type) + t + >>> print(a.first.type) + t + >>> print(a.first.function.type) + + >>> print(a.first.function.function.type) + > + + >>> a = tlp.parse(r'--P') + >>> print(a.type) + t + >>> print(a.term.type) + t + >>> print(a.term.term.type) + t + + >>> tlp.parse(r'\x y.P(x,y)').type + > + >>> tlp.parse(r'\x y.P(x,y)', {'P': '>'}).type + > + + >>> a = tlp.parse(r'\P y.P(john,y)(\x y.see(x,y))') + >>> a.type + + >>> a.function.type + <>,> + >>> a.function.term.term.function.function.type + > + >>> a.argument.type + > + + >>> a = tlp.parse(r'exists c f.(father(c) = f)') + >>> a.type + t + >>> a.term.term.type + t + >>> a.term.term.first.type + e + >>> a.term.term.first.function.type + + >>> a.term.term.second.type + e + +typecheck() + + >>> a = tlp.parse('P(x)') + >>> b = tlp.parse('Q(x)') + >>> a.type + ? + >>> c = a & b + >>> c.first.type + ? + >>> c.typecheck() + {...} + >>> c.first.type + t + + >>> a = tlp.parse('P(x)') + >>> b = tlp.parse('P(x) & Q(x)') + >>> a.type + ? + >>> typecheck([a,b]) + {...} + >>> a.type + t + + >>> e = tlp.parse(r'man(x)') + >>> print(dict((k,str(v)) for k,v in e.typecheck().items()) == {'x': 'e', 'man': ''}) + True + >>> sig = {'man': ''} + >>> e = tlp.parse(r'man(x)', sig) + >>> print(e.function.type) + + >>> print(dict((k,str(v)) for k,v in e.typecheck().items()) == {'x': 'e', 'man': ''}) + True + >>> print(e.function.type) + + >>> print(dict((k,str(v)) for k,v in e.typecheck(sig).items()) == {'x': 'e', 'man': ''}) + True + +findtype() + + >>> print(tlp.parse(r'man(x)').findtype(Variable('man'))) + + >>> print(tlp.parse(r'see(x,y)').findtype(Variable('see'))) + > + >>> print(tlp.parse(r'P(Q(R(x)))').findtype(Variable('Q'))) + ? + +reading types from strings + + >>> Type.fromstring('e') + e + >>> Type.fromstring('') + + >>> Type.fromstring('<,>') + <,> + >>> Type.fromstring('<,?>') + <,?> + +alternative type format + + >>> Type.fromstring('e').str() + 'IND' + >>> Type.fromstring('').str() + '(IND -> ANY)' + >>> Type.fromstring('<,t>').str() + '((IND -> BOOL) -> BOOL)' + +Type.__eq__() + + >>> from nltk.sem.logic import * + + >>> e = ENTITY_TYPE + >>> t = TRUTH_TYPE + >>> a = ANY_TYPE + >>> et = ComplexType(e,t) + >>> eet = ComplexType(e,ComplexType(e,t)) + >>> at = ComplexType(a,t) + >>> ea = ComplexType(e,a) + >>> aa = ComplexType(a,a) + + >>> e == e + True + >>> t == t + True + >>> e == t + False + >>> a == t + False + >>> t == a + False + >>> a == a + True + >>> et == et + True + >>> a == et + False + >>> et == a + False + >>> a == ComplexType(a,aa) + True + >>> ComplexType(a,aa) == a + True + +matches() + + >>> e.matches(t) + False + >>> a.matches(t) + True + >>> t.matches(a) + True + >>> a.matches(et) + True + >>> et.matches(a) + True + >>> ea.matches(eet) + True + >>> eet.matches(ea) + True + >>> aa.matches(et) + True + >>> aa.matches(t) + True + +Type error during parsing +========================= + + >>> try: print(tlp.parse(r'exists x y.(P(x) & P(x,y))')) + ... except InconsistentTypeHierarchyException as e: print(e) + The variable 'P' was found in multiple places with different types. + >>> try: tlp.parse(r'\x y.see(x,y)(\x.man(x))') + ... except TypeException as e: print(e) + The function '\x y.see(x,y)' is of type '>' and cannot be applied to '\x.man(x)' of type ''. Its argument must match type 'e'. + >>> try: tlp.parse(r'\P x y.-P(x,y)(\x.-man(x))') + ... except TypeException as e: print(e) + The function '\P x y.-P(x,y)' is of type '<>,>>' and cannot be applied to '\x.-man(x)' of type ''. Its argument must match type '>'. + + >>> a = tlp.parse(r'-talk(x)') + >>> signature = a.typecheck() + >>> try: print(tlp.parse(r'-talk(x,y)', signature)) + ... except InconsistentTypeHierarchyException as e: print(e) + The variable 'talk' was found in multiple places with different types. + + >>> a = tlp.parse(r'-P(x)') + >>> b = tlp.parse(r'-P(x,y)') + >>> a.typecheck() + {...} + >>> b.typecheck() + {...} + >>> try: typecheck([a,b]) + ... except InconsistentTypeHierarchyException as e: print(e) + The variable 'P' was found in multiple places with different types. + + >>> a = tlp.parse(r'P(x)') + >>> b = tlp.parse(r'P(x,y)') + >>> signature = {'P': ''} + >>> a.typecheck(signature) + {...} + >>> try: typecheck([a,b], signature) + ... except InconsistentTypeHierarchyException as e: print(e) + The variable 'P' was found in multiple places with different types. + +Parse errors +============ + + >>> try: read_expr(r'') + ... except LogicalExpressionException as e: print(e) + End of input found. Expression expected. + + ^ + >>> try: read_expr(r'(') + ... except LogicalExpressionException as e: print(e) + End of input found. Expression expected. + ( + ^ + >>> try: read_expr(r')') + ... except LogicalExpressionException as e: print(e) + Unexpected token: ')'. Expression expected. + ) + ^ + >>> try: read_expr(r'()') + ... except LogicalExpressionException as e: print(e) + Unexpected token: ')'. Expression expected. + () + ^ + >>> try: read_expr(r'(P(x) & Q(x)') + ... except LogicalExpressionException as e: print(e) + End of input found. Expected token ')'. + (P(x) & Q(x) + ^ + >>> try: read_expr(r'(P(x) &') + ... except LogicalExpressionException as e: print(e) + End of input found. Expression expected. + (P(x) & + ^ + >>> try: read_expr(r'(P(x) | )') + ... except LogicalExpressionException as e: print(e) + Unexpected token: ')'. Expression expected. + (P(x) | ) + ^ + >>> try: read_expr(r'P(x) ->') + ... except LogicalExpressionException as e: print(e) + End of input found. Expression expected. + P(x) -> + ^ + >>> try: read_expr(r'P(x') + ... except LogicalExpressionException as e: print(e) + End of input found. Expected token ')'. + P(x + ^ + >>> try: read_expr(r'P(x,') + ... except LogicalExpressionException as e: print(e) + End of input found. Expression expected. + P(x, + ^ + >>> try: read_expr(r'P(x,)') + ... except LogicalExpressionException as e: print(e) + Unexpected token: ')'. Expression expected. + P(x,) + ^ + >>> try: read_expr(r'exists') + ... except LogicalExpressionException as e: print(e) + End of input found. Variable and Expression expected following quantifier 'exists'. + exists + ^ + >>> try: read_expr(r'exists x') + ... except LogicalExpressionException as e: print(e) + End of input found. Expression expected. + exists x + ^ + >>> try: read_expr(r'exists x.') + ... except LogicalExpressionException as e: print(e) + End of input found. Expression expected. + exists x. + ^ + >>> try: read_expr(r'\ ') + ... except LogicalExpressionException as e: print(e) + End of input found. Variable and Expression expected following lambda operator. + \ + ^ + >>> try: read_expr(r'\ x') + ... except LogicalExpressionException as e: print(e) + End of input found. Expression expected. + \ x + ^ + >>> try: read_expr(r'\ x y') + ... except LogicalExpressionException as e: print(e) + End of input found. Expression expected. + \ x y + ^ + >>> try: read_expr(r'\ x.') + ... except LogicalExpressionException as e: print(e) + End of input found. Expression expected. + \ x. + ^ + >>> try: read_expr(r'P(x)Q(x)') + ... except LogicalExpressionException as e: print(e) + Unexpected token: 'Q'. + P(x)Q(x) + ^ + >>> try: read_expr(r'(P(x)Q(x)') + ... except LogicalExpressionException as e: print(e) + Unexpected token: 'Q'. Expected token ')'. + (P(x)Q(x) + ^ + >>> try: read_expr(r'exists x y') + ... except LogicalExpressionException as e: print(e) + End of input found. Expression expected. + exists x y + ^ + >>> try: read_expr(r'exists x y.') + ... except LogicalExpressionException as e: print(e) + End of input found. Expression expected. + exists x y. + ^ + >>> try: read_expr(r'exists x -> y') + ... except LogicalExpressionException as e: print(e) + Unexpected token: '->'. Expression expected. + exists x -> y + ^ + + + >>> try: read_expr(r'A -> ((P(x) & Q(x)) -> Z') + ... except LogicalExpressionException as e: print(e) + End of input found. Expected token ')'. + A -> ((P(x) & Q(x)) -> Z + ^ + >>> try: read_expr(r'A -> ((P(x) &) -> Z') + ... except LogicalExpressionException as e: print(e) + Unexpected token: ')'. Expression expected. + A -> ((P(x) &) -> Z + ^ + >>> try: read_expr(r'A -> ((P(x) | )) -> Z') + ... except LogicalExpressionException as e: print(e) + Unexpected token: ')'. Expression expected. + A -> ((P(x) | )) -> Z + ^ + >>> try: read_expr(r'A -> (P(x) ->) -> Z') + ... except LogicalExpressionException as e: print(e) + Unexpected token: ')'. Expression expected. + A -> (P(x) ->) -> Z + ^ + >>> try: read_expr(r'A -> (P(x) -> Z') + ... except LogicalExpressionException as e: print(e) + End of input found. Expected token ')'. + A -> (P(x) -> Z + ^ + >>> try: read_expr(r'A -> (P(x,) -> Z') + ... except LogicalExpressionException as e: print(e) + Unexpected token: ')'. Expression expected. + A -> (P(x,) -> Z + ^ + >>> try: read_expr(r'A -> (P(x,)) -> Z') + ... except LogicalExpressionException as e: print(e) + Unexpected token: ')'. Expression expected. + A -> (P(x,)) -> Z + ^ + >>> try: read_expr(r'A -> (exists) -> Z') + ... except LogicalExpressionException as e: print(e) + ')' is an illegal variable name. Constants may not be quantified. + A -> (exists) -> Z + ^ + >>> try: read_expr(r'A -> (exists x) -> Z') + ... except LogicalExpressionException as e: print(e) + Unexpected token: ')'. Expression expected. + A -> (exists x) -> Z + ^ + >>> try: read_expr(r'A -> (exists x.) -> Z') + ... except LogicalExpressionException as e: print(e) + Unexpected token: ')'. Expression expected. + A -> (exists x.) -> Z + ^ + >>> try: read_expr(r'A -> (\ ) -> Z') + ... except LogicalExpressionException as e: print(e) + ')' is an illegal variable name. Constants may not be abstracted. + A -> (\ ) -> Z + ^ + >>> try: read_expr(r'A -> (\ x) -> Z') + ... except LogicalExpressionException as e: print(e) + Unexpected token: ')'. Expression expected. + A -> (\ x) -> Z + ^ + >>> try: read_expr(r'A -> (\ x y) -> Z') + ... except LogicalExpressionException as e: print(e) + Unexpected token: ')'. Expression expected. + A -> (\ x y) -> Z + ^ + >>> try: read_expr(r'A -> (\ x.) -> Z') + ... except LogicalExpressionException as e: print(e) + Unexpected token: ')'. Expression expected. + A -> (\ x.) -> Z + ^ + >>> try: read_expr(r'A -> (P(x)Q(x)) -> Z') + ... except LogicalExpressionException as e: print(e) + Unexpected token: 'Q'. Expected token ')'. + A -> (P(x)Q(x)) -> Z + ^ + >>> try: read_expr(r'A -> ((P(x)Q(x)) -> Z') + ... except LogicalExpressionException as e: print(e) + Unexpected token: 'Q'. Expected token ')'. + A -> ((P(x)Q(x)) -> Z + ^ + >>> try: read_expr(r'A -> (all x y) -> Z') + ... except LogicalExpressionException as e: print(e) + Unexpected token: ')'. Expression expected. + A -> (all x y) -> Z + ^ + >>> try: read_expr(r'A -> (exists x y.) -> Z') + ... except LogicalExpressionException as e: print(e) + Unexpected token: ')'. Expression expected. + A -> (exists x y.) -> Z + ^ + >>> try: read_expr(r'A -> (exists x -> y) -> Z') + ... except LogicalExpressionException as e: print(e) + Unexpected token: '->'. Expression expected. + A -> (exists x -> y) -> Z + ^ diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/meteor.doctest b/openflamingo/lib/python3.10/site-packages/nltk/test/meteor.doctest new file mode 100644 index 0000000000000000000000000000000000000000..40f8163b013cb3fda0dcb7c7ab8049ca8ad3c610 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/meteor.doctest @@ -0,0 +1,54 @@ +.. Copyright (C) 2001-2024 NLTK Project +.. For license information, see LICENSE.TXT + +.. -*- coding: utf-8 -*- + +============= +METEOR tests +============= + +No Alignment test +------------------ + + >>> from nltk.translate import meteor + >>> from nltk import word_tokenize + +If the candidate has no alignment to any of the references, the METEOR score is 0. + + >>> round(meteor( + ... [word_tokenize('The candidate has no alignment to any of the references')], + ... word_tokenize('John loves Mary') + ... ), 4) + 0.0 + +Tests based on wikipedia examples +--------------------------------- + +Testing on `wikipedia examples `_ + + >>> same_res = round(meteor( + ... [word_tokenize('The cat sat on the mat')], + ... word_tokenize('The cat sat on the mat') + ... ), 4) + >>> abs(same_res - 0.9977) < 1e-2 + True + + >>> meteor( + ... [word_tokenize('The cat sat on the mat')], + ... word_tokenize('on the mat sat the cat') + ... ) + 0.5 + + >>> round(meteor( + ... [word_tokenize('The cat sat on the mat')], + ... word_tokenize('The cat was sat on the mat') + ... ), 4) + 0.9654 + +Test corresponding to issue #2751, where METEOR score > 1 + + >>> round(meteor( + ... [word_tokenize('create or update a vm set')], + ... word_tokenize('creates or updates a virtual machine scale set') + ... ), 4) + 0.7806 diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/metrics.doctest b/openflamingo/lib/python3.10/site-packages/nltk/test/metrics.doctest new file mode 100644 index 0000000000000000000000000000000000000000..e38b574bfb1e5605ecfd257c554f228724b1b513 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/metrics.doctest @@ -0,0 +1,321 @@ +.. Copyright (C) 2001-2024 NLTK Project +.. For license information, see LICENSE.TXT + +======= +Metrics +======= + +----- +Setup +----- + + >>> import pytest + >>> _ = pytest.importorskip("numpy") + + +The `nltk.metrics` package provides a variety of *evaluation measures* +which can be used for a wide variety of NLP tasks. + + >>> from nltk.metrics import * + +------------------ +Standard IR Scores +------------------ + +We can use standard scores from information retrieval to test the +performance of taggers, chunkers, etc. + + >>> reference = 'DET NN VB DET JJ NN NN IN DET NN'.split() + >>> test = 'DET VB VB DET NN NN NN IN DET NN'.split() + >>> print(accuracy(reference, test)) + 0.8 + + +The following measures apply to sets: + + >>> reference_set = set(reference) + >>> test_set = set(test) + >>> precision(reference_set, test_set) + 1.0 + >>> print(recall(reference_set, test_set)) + 0.8 + >>> print(f_measure(reference_set, test_set)) + 0.88888888888... + +Measuring the likelihood of the data, given probability distributions: + + >>> from nltk import FreqDist, MLEProbDist + >>> pdist1 = MLEProbDist(FreqDist("aldjfalskfjaldsf")) + >>> pdist2 = MLEProbDist(FreqDist("aldjfalssjjlldss")) + >>> print(log_likelihood(['a', 'd'], [pdist1, pdist2])) + -2.7075187496... + + +---------------- +Distance Metrics +---------------- + +String edit distance (Levenshtein): + + >>> edit_distance("rain", "shine") + 3 + >>> edit_distance_align("shine", "shine") + [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)] + >>> edit_distance_align("rain", "brainy") + [(0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (4, 6)] + >>> edit_distance_align("", "brainy") + [(0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6)] + >>> edit_distance_align("", "") + [(0, 0)] + +Other distance measures: + + >>> s1 = set([1,2,3,4]) + >>> s2 = set([3,4,5]) + >>> binary_distance(s1, s2) + 1.0 + >>> print(jaccard_distance(s1, s2)) + 0.6 + >>> print(masi_distance(s1, s2)) + 0.868 + +---------------------- +Miscellaneous Measures +---------------------- + +Rank Correlation works with two dictionaries mapping keys to ranks. +The dictionaries should have the same set of keys. + + >>> spearman_correlation({'e':1, 't':2, 'a':3}, {'e':1, 'a':2, 't':3}) + 0.5 + +Windowdiff uses a sliding window in comparing two segmentations of the same input (e.g. tokenizations, chunkings). +Segmentations are represented using strings of zeros and ones. + + >>> s1 = "000100000010" + >>> s2 = "000010000100" + >>> s3 = "100000010000" + >>> s4 = "000000000000" + >>> s5 = "111111111111" + >>> windowdiff(s1, s1, 3) + 0.0 + >>> abs(windowdiff(s1, s2, 3) - 0.3) < 1e-6 # windowdiff(s1, s2, 3) == 0.3 + True + >>> abs(windowdiff(s2, s3, 3) - 0.8) < 1e-6 # windowdiff(s2, s3, 3) == 0.8 + True + >>> windowdiff(s1, s4, 3) + 0.5 + >>> windowdiff(s1, s5, 3) + 1.0 + +---------------- +Confusion Matrix +---------------- + + >>> reference = 'This is the reference data. Testing 123. aoaeoeoe' + >>> test = 'Thos iz_the rifirenci data. Testeng 123. aoaeoeoe' + >>> print(ConfusionMatrix(reference, test)) + | . 1 2 3 T _ a c d e f g h i n o r s t z | + --+-------------------------------------------+ + |<8>. . . . . 1 . . . . . . . . . . . . . . | + . | .<2>. . . . . . . . . . . . . . . . . . . | + 1 | . .<1>. . . . . . . . . . . . . . . . . . | + 2 | . . .<1>. . . . . . . . . . . . . . . . . | + 3 | . . . .<1>. . . . . . . . . . . . . . . . | + T | . . . . .<2>. . . . . . . . . . . . . . . | + _ | . . . . . .<.>. . . . . . . . . . . . . . | + a | . . . . . . .<4>. . . . . . . . . . . . . | + c | . . . . . . . .<1>. . . . . . . . . . . . | + d | . . . . . . . . .<1>. . . . . . . . . . . | + e | . . . . . . . . . .<6>. . . 3 . . . . . . | + f | . . . . . . . . . . .<1>. . . . . . . . . | + g | . . . . . . . . . . . .<1>. . . . . . . . | + h | . . . . . . . . . . . . .<2>. . . . . . . | + i | . . . . . . . . . . 1 . . .<1>. 1 . . . . | + n | . . . . . . . . . . . . . . .<2>. . . . . | + o | . . . . . . . . . . . . . . . .<3>. . . . | + r | . . . . . . . . . . . . . . . . .<2>. . . | + s | . . . . . . . . . . . . . . . . . .<2>. 1 | + t | . . . . . . . . . . . . . . . . . . .<3>. | + z | . . . . . . . . . . . . . . . . . . . .<.>| + --+-------------------------------------------+ + (row = reference; col = test) + + + >>> cm = ConfusionMatrix(reference, test) + >>> print(cm.pretty_format(sort_by_count=True)) + | e a i o s t . T h n r 1 2 3 c d f g _ z | + --+-------------------------------------------+ + |<8>. . . . . . . . . . . . . . . . . . 1 . | + e | .<6>. 3 . . . . . . . . . . . . . . . . . | + a | . .<4>. . . . . . . . . . . . . . . . . . | + i | . 1 .<1>1 . . . . . . . . . . . . . . . . | + o | . . . .<3>. . . . . . . . . . . . . . . . | + s | . . . . .<2>. . . . . . . . . . . . . . 1 | + t | . . . . . .<3>. . . . . . . . . . . . . . | + . | . . . . . . .<2>. . . . . . . . . . . . . | + T | . . . . . . . .<2>. . . . . . . . . . . . | + h | . . . . . . . . .<2>. . . . . . . . . . . | + n | . . . . . . . . . .<2>. . . . . . . . . . | + r | . . . . . . . . . . .<2>. . . . . . . . . | + 1 | . . . . . . . . . . . .<1>. . . . . . . . | + 2 | . . . . . . . . . . . . .<1>. . . . . . . | + 3 | . . . . . . . . . . . . . .<1>. . . . . . | + c | . . . . . . . . . . . . . . .<1>. . . . . | + d | . . . . . . . . . . . . . . . .<1>. . . . | + f | . . . . . . . . . . . . . . . . .<1>. . . | + g | . . . . . . . . . . . . . . . . . .<1>. . | + _ | . . . . . . . . . . . . . . . . . . .<.>. | + z | . . . . . . . . . . . . . . . . . . . .<.>| + --+-------------------------------------------+ + (row = reference; col = test) + + + >>> print(cm.pretty_format(sort_by_count=True, truncate=10)) + | e a i o s t . T h | + --+---------------------+ + |<8>. . . . . . . . . | + e | .<6>. 3 . . . . . . | + a | . .<4>. . . . . . . | + i | . 1 .<1>1 . . . . . | + o | . . . .<3>. . . . . | + s | . . . . .<2>. . . . | + t | . . . . . .<3>. . . | + . | . . . . . . .<2>. . | + T | . . . . . . . .<2>. | + h | . . . . . . . . .<2>| + --+---------------------+ + (row = reference; col = test) + + + >>> print(cm.pretty_format(sort_by_count=True, truncate=10, values_in_chart=False)) + | 1 | + | 1 2 3 4 5 6 7 8 9 0 | + ---+---------------------+ + 1 |<8>. . . . . . . . . | + 2 | .<6>. 3 . . . . . . | + 3 | . .<4>. . . . . . . | + 4 | . 1 .<1>1 . . . . . | + 5 | . . . .<3>. . . . . | + 6 | . . . . .<2>. . . . | + 7 | . . . . . .<3>. . . | + 8 | . . . . . . .<2>. . | + 9 | . . . . . . . .<2>. | + 10 | . . . . . . . . .<2>| + ---+---------------------+ + (row = reference; col = test) + Value key: + 1: + 2: e + 3: a + 4: i + 5: o + 6: s + 7: t + 8: . + 9: T + 10: h + + +For "e", the number of true positives should be 6, while the number of false negatives is 3. +So, the recall ought to be 6 / (6 + 3): + + >>> cm.recall("e") # doctest: +ELLIPSIS + 0.666666... + +For "e", the false positive is just 1, so the precision should be 6 / (6 + 1): + + >>> cm.precision("e") # doctest: +ELLIPSIS + 0.857142... + +The f-measure with default value of ``alpha = 0.5`` should then be: + +* *1/(alpha/p + (1-alpha)/r) =* +* *1/(0.5/p + 0.5/r) =* +* *2pr / (p + r) =* +* *2 * 0.857142... * 0.666666... / (0.857142... + 0.666666...) =* +* *0.749999...* + + >>> cm.f_measure("e") # doctest: +ELLIPSIS + 0.749999... + +-------------------- +Association measures +-------------------- + +These measures are useful to determine whether the coocurrence of two random +events is meaningful. They are used, for instance, to distinguish collocations +from other pairs of adjacent words. + +We bring some examples of bigram association calculations from Manning and +Schutze's SNLP, 2nd Ed. chapter 5. + + >>> n_new_companies, n_new, n_companies, N = 8, 15828, 4675, 14307668 + >>> bam = BigramAssocMeasures + >>> bam.raw_freq(20, (42, 20), N) == 20. / N + True + >>> bam.student_t(n_new_companies, (n_new, n_companies), N) + 0.999... + >>> bam.chi_sq(n_new_companies, (n_new, n_companies), N) + 1.54... + >>> bam.likelihood_ratio(150, (12593, 932), N) + 1291... + +For other associations, we ensure the ordering of the measures: + + >>> bam.mi_like(20, (42, 20), N) > bam.mi_like(20, (41, 27), N) + True + >>> bam.pmi(20, (42, 20), N) > bam.pmi(20, (41, 27), N) + True + >>> bam.phi_sq(20, (42, 20), N) > bam.phi_sq(20, (41, 27), N) + True + >>> bam.poisson_stirling(20, (42, 20), N) > bam.poisson_stirling(20, (41, 27), N) + True + >>> bam.jaccard(20, (42, 20), N) > bam.jaccard(20, (41, 27), N) + True + >>> bam.dice(20, (42, 20), N) > bam.dice(20, (41, 27), N) + True + >>> bam.fisher(20, (42, 20), N) > bam.fisher(20, (41, 27), N) # doctest: +SKIP + False + +For trigrams, we have to provide more count information: + + >>> n_w1_w2_w3 = 20 + >>> n_w1_w2, n_w1_w3, n_w2_w3 = 35, 60, 40 + >>> pair_counts = (n_w1_w2, n_w1_w3, n_w2_w3) + >>> n_w1, n_w2, n_w3 = 100, 200, 300 + >>> uni_counts = (n_w1, n_w2, n_w3) + >>> N = 14307668 + >>> tam = TrigramAssocMeasures + >>> tam.raw_freq(n_w1_w2_w3, pair_counts, uni_counts, N) == 1. * n_w1_w2_w3 / N + True + >>> uni_counts2 = (n_w1, n_w2, 100) + >>> tam.student_t(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.student_t(n_w1_w2_w3, pair_counts, uni_counts, N) + True + >>> tam.chi_sq(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.chi_sq(n_w1_w2_w3, pair_counts, uni_counts, N) + True + >>> tam.mi_like(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.mi_like(n_w1_w2_w3, pair_counts, uni_counts, N) + True + >>> tam.pmi(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.pmi(n_w1_w2_w3, pair_counts, uni_counts, N) + True + >>> tam.likelihood_ratio(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.likelihood_ratio(n_w1_w2_w3, pair_counts, uni_counts, N) + True + >>> tam.poisson_stirling(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.poisson_stirling(n_w1_w2_w3, pair_counts, uni_counts, N) + True + >>> tam.jaccard(n_w1_w2_w3, pair_counts, uni_counts2, N) > tam.jaccard(n_w1_w2_w3, pair_counts, uni_counts, N) + True + + +For fourgrams, we have to provide more count information: + + >>> n_w1_w2_w3_w4 = 5 + >>> n_w1_w2, n_w1_w3, n_w2_w3 = 35, 60, 40 + >>> n_w1_w2_w3, n_w2_w3_w4 = 20, 10 + >>> pair_counts = (n_w1_w2, n_w1_w3, n_w2_w3) + >>> triplet_counts = (n_w1_w2_w3, n_w2_w3_w4) + >>> n_w1, n_w2, n_w3, n_w4 = 100, 200, 300, 400 + >>> uni_counts = (n_w1, n_w2, n_w3, n_w4) + >>> N = 14307668 + >>> qam = QuadgramAssocMeasures + >>> qam.raw_freq(n_w1_w2_w3_w4, pair_counts, triplet_counts, uni_counts, N) == 1. * n_w1_w2_w3_w4 / N + True diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/nonmonotonic.doctest b/openflamingo/lib/python3.10/site-packages/nltk/test/nonmonotonic.doctest new file mode 100644 index 0000000000000000000000000000000000000000..1abea480c4901b705479117d1894a2a08669c491 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/nonmonotonic.doctest @@ -0,0 +1,293 @@ +.. Copyright (C) 2001-2024 NLTK Project +.. For license information, see LICENSE.TXT + +====================== +Nonmonotonic Reasoning +====================== + + >>> from nltk.test.setup_fixt import check_binary + >>> check_binary('mace4') + + >>> from nltk import * + >>> from nltk.inference.nonmonotonic import * + >>> from nltk.sem import logic + >>> logic._counter._value = 0 + >>> read_expr = logic.Expression.fromstring + +------------------------ +Closed Domain Assumption +------------------------ + +The only entities in the domain are those found in the assumptions or goal. +If the domain only contains "A" and "B", then the expression "exists x.P(x)" can +be replaced with "P(A) | P(B)" and an expression "all x.P(x)" can be replaced +with "P(A) & P(B)". + + >>> p1 = read_expr(r'all x.(man(x) -> mortal(x))') + >>> p2 = read_expr(r'man(Socrates)') + >>> c = read_expr(r'mortal(Socrates)') + >>> prover = Prover9Command(c, [p1,p2]) + >>> prover.prove() + True + >>> cdp = ClosedDomainProver(prover) + >>> for a in cdp.assumptions(): print(a) # doctest: +SKIP + (man(Socrates) -> mortal(Socrates)) + man(Socrates) + >>> cdp.prove() + True + + >>> p1 = read_expr(r'exists x.walk(x)') + >>> p2 = read_expr(r'man(Socrates)') + >>> c = read_expr(r'walk(Socrates)') + >>> prover = Prover9Command(c, [p1,p2]) + >>> prover.prove() + False + >>> cdp = ClosedDomainProver(prover) + >>> for a in cdp.assumptions(): print(a) # doctest: +SKIP + walk(Socrates) + man(Socrates) + >>> cdp.prove() + True + + >>> p1 = read_expr(r'exists x.walk(x)') + >>> p2 = read_expr(r'man(Socrates)') + >>> p3 = read_expr(r'-walk(Bill)') + >>> c = read_expr(r'walk(Socrates)') + >>> prover = Prover9Command(c, [p1,p2,p3]) + >>> prover.prove() + False + >>> cdp = ClosedDomainProver(prover) + >>> for a in cdp.assumptions(): print(a) # doctest: +SKIP + (walk(Socrates) | walk(Bill)) + man(Socrates) + -walk(Bill) + >>> cdp.prove() + True + + >>> p1 = read_expr(r'walk(Socrates)') + >>> p2 = read_expr(r'walk(Bill)') + >>> c = read_expr(r'all x.walk(x)') + >>> prover = Prover9Command(c, [p1,p2]) + >>> prover.prove() + False + >>> cdp = ClosedDomainProver(prover) + >>> for a in cdp.assumptions(): print(a) # doctest: +SKIP + walk(Socrates) + walk(Bill) + >>> print(cdp.goal()) # doctest: +SKIP + (walk(Socrates) & walk(Bill)) + >>> cdp.prove() + True + + >>> p1 = read_expr(r'girl(mary)') + >>> p2 = read_expr(r'dog(rover)') + >>> p3 = read_expr(r'all x.(girl(x) -> -dog(x))') + >>> p4 = read_expr(r'all x.(dog(x) -> -girl(x))') + >>> p5 = read_expr(r'chase(mary, rover)') + >>> c = read_expr(r'exists y.(dog(y) & all x.(girl(x) -> chase(x,y)))') + >>> prover = Prover9Command(c, [p1,p2,p3,p4,p5]) + >>> print(prover.prove()) + False + >>> cdp = ClosedDomainProver(prover) + >>> for a in cdp.assumptions(): print(a) # doctest: +SKIP + girl(mary) + dog(rover) + ((girl(rover) -> -dog(rover)) & (girl(mary) -> -dog(mary))) + ((dog(rover) -> -girl(rover)) & (dog(mary) -> -girl(mary))) + chase(mary,rover) + >>> print(cdp.goal()) # doctest: +SKIP + ((dog(rover) & (girl(rover) -> chase(rover,rover)) & (girl(mary) -> chase(mary,rover))) | (dog(mary) & (girl(rover) -> chase(rover,mary)) & (girl(mary) -> chase(mary,mary)))) + >>> print(cdp.prove()) + True + +----------------------- +Unique Names Assumption +----------------------- + +No two entities in the domain represent the same entity unless it can be +explicitly proven that they do. Therefore, if the domain contains "A" and "B", +then add the assumption "-(A = B)" if it is not the case that +" \|- (A = B)". + + >>> p1 = read_expr(r'man(Socrates)') + >>> p2 = read_expr(r'man(Bill)') + >>> c = read_expr(r'exists x.exists y.-(x = y)') + >>> prover = Prover9Command(c, [p1,p2]) + >>> prover.prove() + False + >>> unp = UniqueNamesProver(prover) + >>> for a in unp.assumptions(): print(a) # doctest: +SKIP + man(Socrates) + man(Bill) + -(Socrates = Bill) + >>> unp.prove() + True + + >>> p1 = read_expr(r'all x.(walk(x) -> (x = Socrates))') + >>> p2 = read_expr(r'Bill = William') + >>> p3 = read_expr(r'Bill = Billy') + >>> c = read_expr(r'-walk(William)') + >>> prover = Prover9Command(c, [p1,p2,p3]) + >>> prover.prove() + False + >>> unp = UniqueNamesProver(prover) + >>> for a in unp.assumptions(): print(a) # doctest: +SKIP + all x.(walk(x) -> (x = Socrates)) + (Bill = William) + (Bill = Billy) + -(William = Socrates) + -(Billy = Socrates) + -(Socrates = Bill) + >>> unp.prove() + True + +----------------------- +Closed World Assumption +----------------------- + +The only entities that have certain properties are those that is it stated +have the properties. We accomplish this assumption by "completing" predicates. + +If the assumptions contain "P(A)", then "all x.(P(x) -> (x=A))" is the completion +of "P". If the assumptions contain "all x.(ostrich(x) -> bird(x))", then +"all x.(bird(x) -> ostrich(x))" is the completion of "bird". If the +assumptions don't contain anything that are "P", then "all x.-P(x)" is the +completion of "P". + + >>> p1 = read_expr(r'walk(Socrates)') + >>> p2 = read_expr(r'-(Socrates = Bill)') + >>> c = read_expr(r'-walk(Bill)') + >>> prover = Prover9Command(c, [p1,p2]) + >>> prover.prove() + False + >>> cwp = ClosedWorldProver(prover) + >>> for a in cwp.assumptions(): print(a) # doctest: +SKIP + walk(Socrates) + -(Socrates = Bill) + all z1.(walk(z1) -> (z1 = Socrates)) + >>> cwp.prove() + True + + >>> p1 = read_expr(r'see(Socrates, John)') + >>> p2 = read_expr(r'see(John, Mary)') + >>> p3 = read_expr(r'-(Socrates = John)') + >>> p4 = read_expr(r'-(John = Mary)') + >>> c = read_expr(r'-see(Socrates, Mary)') + >>> prover = Prover9Command(c, [p1,p2,p3,p4]) + >>> prover.prove() + False + >>> cwp = ClosedWorldProver(prover) + >>> for a in cwp.assumptions(): print(a) # doctest: +SKIP + see(Socrates,John) + see(John,Mary) + -(Socrates = John) + -(John = Mary) + all z3 z4.(see(z3,z4) -> (((z3 = Socrates) & (z4 = John)) | ((z3 = John) & (z4 = Mary)))) + >>> cwp.prove() + True + + >>> p1 = read_expr(r'all x.(ostrich(x) -> bird(x))') + >>> p2 = read_expr(r'bird(Tweety)') + >>> p3 = read_expr(r'-ostrich(Sam)') + >>> p4 = read_expr(r'Sam != Tweety') + >>> c = read_expr(r'-bird(Sam)') + >>> prover = Prover9Command(c, [p1,p2,p3,p4]) + >>> prover.prove() + False + >>> cwp = ClosedWorldProver(prover) + >>> for a in cwp.assumptions(): print(a) # doctest: +SKIP + all x.(ostrich(x) -> bird(x)) + bird(Tweety) + -ostrich(Sam) + -(Sam = Tweety) + all z7.-ostrich(z7) + all z8.(bird(z8) -> ((z8 = Tweety) | ostrich(z8))) + >>> print(cwp.prove()) + True + +----------------------- +Multi-Decorator Example +----------------------- + +Decorators can be nested to utilize multiple assumptions. + + >>> p1 = read_expr(r'see(Socrates, John)') + >>> p2 = read_expr(r'see(John, Mary)') + >>> c = read_expr(r'-see(Socrates, Mary)') + >>> prover = Prover9Command(c, [p1,p2]) + >>> print(prover.prove()) + False + >>> cmd = ClosedDomainProver(UniqueNamesProver(ClosedWorldProver(prover))) + >>> print(cmd.prove()) + True + +----------------- +Default Reasoning +----------------- + >>> logic._counter._value = 0 + >>> premises = [] + +define the taxonomy + + >>> premises.append(read_expr(r'all x.(elephant(x) -> animal(x))')) + >>> premises.append(read_expr(r'all x.(bird(x) -> animal(x))')) + >>> premises.append(read_expr(r'all x.(dove(x) -> bird(x))')) + >>> premises.append(read_expr(r'all x.(ostrich(x) -> bird(x))')) + >>> premises.append(read_expr(r'all x.(flying_ostrich(x) -> ostrich(x))')) + +default the properties using abnormalities + + >>> premises.append(read_expr(r'all x.((animal(x) & -Ab1(x)) -> -fly(x))')) #normal animals don't fly + >>> premises.append(read_expr(r'all x.((bird(x) & -Ab2(x)) -> fly(x))')) #normal birds fly + >>> premises.append(read_expr(r'all x.((ostrich(x) & -Ab3(x)) -> -fly(x))')) #normal ostriches don't fly + +specify abnormal entities + + >>> premises.append(read_expr(r'all x.(bird(x) -> Ab1(x))')) #flight + >>> premises.append(read_expr(r'all x.(ostrich(x) -> Ab2(x))')) #non-flying bird + >>> premises.append(read_expr(r'all x.(flying_ostrich(x) -> Ab3(x))')) #flying ostrich + +define entities + + >>> premises.append(read_expr(r'elephant(el)')) + >>> premises.append(read_expr(r'dove(do)')) + >>> premises.append(read_expr(r'ostrich(os)')) + +print the augmented assumptions list + + >>> prover = Prover9Command(None, premises) + >>> command = UniqueNamesProver(ClosedWorldProver(prover)) + >>> for a in command.assumptions(): print(a) # doctest: +SKIP + all x.(elephant(x) -> animal(x)) + all x.(bird(x) -> animal(x)) + all x.(dove(x) -> bird(x)) + all x.(ostrich(x) -> bird(x)) + all x.(flying_ostrich(x) -> ostrich(x)) + all x.((animal(x) & -Ab1(x)) -> -fly(x)) + all x.((bird(x) & -Ab2(x)) -> fly(x)) + all x.((ostrich(x) & -Ab3(x)) -> -fly(x)) + all x.(bird(x) -> Ab1(x)) + all x.(ostrich(x) -> Ab2(x)) + all x.(flying_ostrich(x) -> Ab3(x)) + elephant(el) + dove(do) + ostrich(os) + all z1.(animal(z1) -> (elephant(z1) | bird(z1))) + all z2.(Ab1(z2) -> bird(z2)) + all z3.(bird(z3) -> (dove(z3) | ostrich(z3))) + all z4.(dove(z4) -> (z4 = do)) + all z5.(Ab2(z5) -> ostrich(z5)) + all z6.(Ab3(z6) -> flying_ostrich(z6)) + all z7.(ostrich(z7) -> ((z7 = os) | flying_ostrich(z7))) + all z8.-flying_ostrich(z8) + all z9.(elephant(z9) -> (z9 = el)) + -(el = os) + -(el = do) + -(os = do) + + >>> UniqueNamesProver(ClosedWorldProver(Prover9Command(read_expr('-fly(el)'), premises))).prove() + True + >>> UniqueNamesProver(ClosedWorldProver(Prover9Command(read_expr('fly(do)'), premises))).prove() + True + >>> UniqueNamesProver(ClosedWorldProver(Prover9Command(read_expr('-fly(os)'), premises))).prove() + True diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/relextract.doctest b/openflamingo/lib/python3.10/site-packages/nltk/test/relextract.doctest new file mode 100644 index 0000000000000000000000000000000000000000..c7c42a699a50a36e86cbddb6cf7d0ebcddbb0d83 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/relextract.doctest @@ -0,0 +1,263 @@ +.. Copyright (C) 2001-2024 NLTK Project +.. For license information, see LICENSE.TXT + +====================== +Information Extraction +====================== + +Information Extraction standardly consists of three subtasks: + +#. Named Entity Recognition + +#. Relation Extraction + +#. Template Filling + +Named Entities +~~~~~~~~~~~~~~ + +The IEER corpus is marked up for a variety of Named Entities. A Named +Entity (more strictly, a Named Entity mention) is a name of an +entity belonging to a specified class. For example, the Named Entity +classes in IEER include PERSON, LOCATION, ORGANIZATION, DATE and so +on. Within NLTK, Named Entities are represented as subtrees within a +chunk structure: the class name is treated as node label, while the +entity mention itself appears as the leaves of the subtree. This is +illustrated below, where we have show an extract of the chunk +representation of document NYT_19980315.064: + + >>> from nltk.corpus import ieer + >>> docs = ieer.parsed_docs('NYT_19980315') + >>> tree = docs[1].text + >>> print(tree) + (DOCUMENT + ... + ``It's + a + chance + to + think + about + first-level + questions,'' + said + Ms. + (PERSON Cohn) + , + a + partner + in + the + (ORGANIZATION McGlashan & Sarrail) + firm + in + (LOCATION San Mateo) + , + (LOCATION Calif.) + ...) + +Thus, the Named Entity mentions in this example are *Cohn*, *McGlashan & +Sarrail*, *San Mateo* and *Calif.*. + +The CoNLL2002 Dutch and Spanish data is treated similarly, although in +this case, the strings are also POS tagged. + + >>> from nltk.corpus import conll2002 + >>> for doc in conll2002.chunked_sents('ned.train')[27]: + ... print(doc) + ('Het', 'Art') + (ORG Hof/N van/Prep Cassatie/N) + ('verbrak', 'V') + ('het', 'Art') + ('arrest', 'N') + ('zodat', 'Conj') + ('het', 'Pron') + ('moest', 'V') + ('worden', 'V') + ('overgedaan', 'V') + ('door', 'Prep') + ('het', 'Art') + ('hof', 'N') + ('van', 'Prep') + ('beroep', 'N') + ('van', 'Prep') + (LOC Antwerpen/N) + ('.', 'Punc') + +Relation Extraction +~~~~~~~~~~~~~~~~~~~ + +Relation Extraction standardly consists of identifying specified +relations between Named Entities. For example, assuming that we can +recognize ORGANIZATIONs and LOCATIONs in text, we might want to also +recognize pairs *(o, l)* of these kinds of entities such that *o* is +located in *l*. + +The `sem.relextract` module provides some tools to help carry out a +simple version of this task. The `tree2semi_rel()` function splits a chunk +document into a list of two-member lists, each of which consists of a +(possibly empty) string followed by a `Tree` (i.e., a Named Entity): + + >>> from nltk.sem import relextract + >>> pairs = relextract.tree2semi_rel(tree) + >>> for s, tree in pairs[18:22]: + ... print('("...%s", %s)' % (" ".join(s[-5:]),tree)) + ("...about first-level questions,'' said Ms.", (PERSON Cohn)) + ("..., a partner in the", (ORGANIZATION McGlashan & Sarrail)) + ("...firm in", (LOCATION San Mateo)) + ("...,", (LOCATION Calif.)) + +The function `semi_rel2reldict()` processes triples of these pairs, i.e., +pairs of the form ``((string1, Tree1), (string2, Tree2), (string3, +Tree3))`` and outputs a dictionary (a `reldict`) in which ``Tree1`` is +the subject of the relation, ``string2`` is the filler +and ``Tree3`` is the object of the relation. ``string1`` and ``string3`` are +stored as left and right context respectively. + + >>> reldicts = relextract.semi_rel2reldict(pairs) + >>> for k, v in sorted(reldicts[0].items()): + ... print(k, '=>', v) + filler => of messages to their own ``Cyberia'' ... + lcon => transactions.'' Each week, they post + objclass => ORGANIZATION + objsym => white_house + objtext => White House + rcon => for access to its planned + subjclass => CARDINAL + subjsym => hundreds + subjtext => hundreds + untagged_filler => of messages to their own ``Cyberia'' ... + +The next example shows some of the values for two `reldict`\ s +corresponding to the ``'NYT_19980315'`` text extract shown earlier. + + >>> for r in reldicts[18:20]: + ... print('=' * 20) + ... print(r['subjtext']) + ... print(r['filler']) + ... print(r['objtext']) + ==================== + Cohn + , a partner in the + McGlashan & Sarrail + ==================== + McGlashan & Sarrail + firm in + San Mateo + +The function `relextract()` allows us to filter the `reldict`\ s +according to the classes of the subject and object named entities. In +addition, we can specify that the filler text has to match a given +regular expression, as illustrated in the next example. Here, we are +looking for pairs of entities in the IN relation, where IN has +signature . + + >>> import re + >>> IN = re.compile(r'.*\bin\b(?!\b.+ing\b)') + >>> for fileid in ieer.fileids(): + ... for doc in ieer.parsed_docs(fileid): + ... for rel in relextract.extract_rels('ORG', 'LOC', doc, corpus='ieer', pattern = IN): + ... print(relextract.rtuple(rel)) + [ORG: 'Christian Democrats'] ', the leading political forces in' [LOC: 'Italy'] + [ORG: 'AP'] ') _ Lebanese guerrillas attacked Israeli forces in southern' [LOC: 'Lebanon'] + [ORG: 'Security Council'] 'adopted Resolution 425. Huge yellow banners hung across intersections in' [LOC: 'Beirut'] + [ORG: 'U.N.'] 'failures in' [LOC: 'Africa'] + [ORG: 'U.N.'] 'peacekeeping operation in' [LOC: 'Somalia'] + [ORG: 'U.N.'] 'partners on a more effective role in' [LOC: 'Africa'] + [ORG: 'AP'] ') _ A bomb exploded in a mosque in central' [LOC: 'San`a'] + [ORG: 'Krasnoye Sormovo'] 'shipyard in the Soviet city of' [LOC: 'Gorky'] + [ORG: 'Kelab Golf Darul Ridzuan'] 'in' [LOC: 'Perak'] + [ORG: 'U.N.'] 'peacekeeping operation in' [LOC: 'Somalia'] + [ORG: 'WHYY'] 'in' [LOC: 'Philadelphia'] + [ORG: 'McGlashan & Sarrail'] 'firm in' [LOC: 'San Mateo'] + [ORG: 'Freedom Forum'] 'in' [LOC: 'Arlington'] + [ORG: 'Brookings Institution'] ', the research group in' [LOC: 'Washington'] + [ORG: 'Idealab'] ', a self-described business incubator based in' [LOC: 'Los Angeles'] + [ORG: 'Open Text'] ', based in' [LOC: 'Waterloo'] + ... + +The next example illustrates a case where the pattern is a disjunction +of roles that a PERSON can occupy in an ORGANIZATION. + + >>> roles = r""" + ... (.*( + ... analyst| + ... chair(wo)?man| + ... commissioner| + ... counsel| + ... director| + ... economist| + ... editor| + ... executive| + ... foreman| + ... governor| + ... head| + ... lawyer| + ... leader| + ... librarian).*)| + ... manager| + ... partner| + ... president| + ... producer| + ... professor| + ... researcher| + ... spokes(wo)?man| + ... writer| + ... ,\sof\sthe?\s* # "X, of (the) Y" + ... """ + >>> ROLES = re.compile(roles, re.VERBOSE) + >>> for fileid in ieer.fileids(): + ... for doc in ieer.parsed_docs(fileid): + ... for rel in relextract.extract_rels('PER', 'ORG', doc, corpus='ieer', pattern=ROLES): + ... print(relextract.rtuple(rel)) + [PER: 'Kivutha Kibwana'] ', of the' [ORG: 'National Convention Assembly'] + [PER: 'Boban Boskovic'] ', chief executive of the' [ORG: 'Plastika'] + [PER: 'Annan'] ', the first sub-Saharan African to head the' [ORG: 'United Nations'] + [PER: 'Kiriyenko'] 'became a foreman at the' [ORG: 'Krasnoye Sormovo'] + [PER: 'Annan'] ', the first sub-Saharan African to head the' [ORG: 'United Nations'] + [PER: 'Mike Godwin'] ', chief counsel for the' [ORG: 'Electronic Frontier Foundation'] + ... + +In the case of the CoNLL2002 data, we can include POS tags in the +query pattern. This example also illustrates how the output can be +presented as something that looks more like a clause in a logical language. + + >>> de = """ + ... .* + ... ( + ... de/SP| + ... del/SP + ... ) + ... """ + >>> DE = re.compile(de, re.VERBOSE) + >>> rels = [rel for doc in conll2002.chunked_sents('esp.train') + ... for rel in relextract.extract_rels('ORG', 'LOC', doc, corpus='conll2002', pattern = DE)] + >>> for r in rels[:10]: + ... print(relextract.clause(r, relsym='DE')) + DE('tribunal_supremo', 'victoria') + DE('museo_de_arte', 'alcorc\xf3n') + DE('museo_de_bellas_artes', 'a_coru\xf1a') + DE('siria', 'l\xedbano') + DE('uni\xf3n_europea', 'pek\xedn') + DE('ej\xe9rcito', 'rogberi') + DE('juzgado_de_instrucci\xf3n_n\xfamero_1', 'san_sebasti\xe1n') + DE('psoe', 'villanueva_de_la_serena') + DE('ej\xe9rcito', 'l\xedbano') + DE('juzgado_de_lo_penal_n\xfamero_2', 'ceuta') + >>> vnv = """ + ... ( + ... is/V| + ... was/V| + ... werd/V| + ... wordt/V + ... ) + ... .* + ... van/Prep + ... """ + >>> VAN = re.compile(vnv, re.VERBOSE) + >>> for doc in conll2002.chunked_sents('ned.train'): + ... for r in relextract.extract_rels('PER', 'ORG', doc, corpus='conll2002', pattern=VAN): + ... print(relextract.clause(r, relsym="VAN")) + VAN("cornet_d'elzius", 'buitenlandse_handel') + VAN('johan_rottiers', 'kardinaal_van_roey_instituut') + VAN('annie_lennox', 'eurythmics') diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/sentiwordnet.doctest b/openflamingo/lib/python3.10/site-packages/nltk/test/sentiwordnet.doctest new file mode 100644 index 0000000000000000000000000000000000000000..c8c587d11f0cc0117d4063949740a857d76ccc6f --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/sentiwordnet.doctest @@ -0,0 +1,41 @@ +.. Copyright (C) 2001-2024 NLTK Project +.. For license information, see LICENSE.TXT + +====================== +SentiWordNet Interface +====================== + +SentiWordNet can be imported like this: + + >>> from nltk.corpus import sentiwordnet as swn + +------------ +SentiSynsets +------------ + + >>> breakdown = swn.senti_synset('breakdown.n.03') + >>> print(breakdown) + + >>> breakdown.pos_score() + 0.0 + >>> breakdown.neg_score() + 0.25 + >>> breakdown.obj_score() + 0.75 + + +------ +Lookup +------ + + >>> list(swn.senti_synsets('slow')) + [SentiSynset('decelerate.v.01'), SentiSynset('slow.v.02'), + SentiSynset('slow.v.03'), SentiSynset('slow.a.01'), + SentiSynset('slow.a.02'), SentiSynset('dense.s.04'), + SentiSynset('slow.a.04'), SentiSynset('boring.s.01'), + SentiSynset('dull.s.08'), SentiSynset('slowly.r.01'), + SentiSynset('behind.r.03')] + + >>> happy = swn.senti_synsets('happy', 'a') + + >>> all = swn.all_senti_synsets() diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/toolbox.doctest b/openflamingo/lib/python3.10/site-packages/nltk/test/toolbox.doctest new file mode 100644 index 0000000000000000000000000000000000000000..cab0d7416de23b66f9dad230ae7699f2025d72d5 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/toolbox.doctest @@ -0,0 +1,306 @@ +.. Copyright (C) 2001-2024 NLTK Project +.. For license information, see LICENSE.TXT + +=============================== +Unit test cases for ``toolbox`` +=============================== + + >>> from nltk import toolbox + +-------------------------- +``toolbox.StandardFormat`` +-------------------------- + + >>> f = toolbox.StandardFormat() + +``toolbox.StandardFormat.open()`` +--------------------------------- + >>> import os, tempfile + >>> (fd, fname) = tempfile.mkstemp() + >>> tf = os.fdopen(fd, "w") + >>> _ = tf.write('\\lx a value\n\\lx another value\n') + >>> tf.close() + >>> f = toolbox.StandardFormat() + >>> f.open(fname) + >>> list(f.fields()) + [('lx', 'a value'), ('lx', 'another value')] + >>> f.close() + >>> os.unlink(fname) + +``toolbox.StandardFormat.open_string()`` +---------------------------------------- + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx a value\n\\lx another value\n') + >>> list(f.fields()) + [('lx', 'a value'), ('lx', 'another value')] + >>> f.close() + +``toolbox.StandardFormat.close()`` +---------------------------------- + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx a value\n\\lx another value\n') + >>> list(f.fields()) + [('lx', 'a value'), ('lx', 'another value')] + >>> f.close() + +``toolbox.StandardFormat.line_num`` +--------------------------------------- + +``StandardFormat.line_num`` contains the line number of the last line returned: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx a value\n\\lx another value\n\\lx a third value\n') + >>> line_nums = [] + >>> for l in f.raw_fields(): + ... line_nums.append(f.line_num) + >>> line_nums + [1, 2, 3] + +``StandardFormat.line_num`` contains the line number of the last line returned: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx two\nlines\n\\lx three\nlines\n\n\\lx two\nlines\n') + >>> line_nums = [] + >>> for l in f.raw_fields(): + ... line_nums.append(f.line_num) + >>> line_nums + [2, 5, 7] + +``StandardFormat.line_num`` doesn't exist before opening or after closing +a file or string: + + >>> f = toolbox.StandardFormat() + >>> f.line_num + Traceback (most recent call last): + ... + AttributeError: 'StandardFormat' object has no attribute 'line_num' + >>> f.open_string('\\lx two\nlines\n\\lx three\nlines\n\n\\lx two\nlines\n') + >>> line_nums = [] + >>> for l in f.raw_fields(): + ... line_nums.append(f.line_num) + >>> line_nums + [2, 5, 7] + >>> f.close() + >>> f.line_num + Traceback (most recent call last): + ... + AttributeError: 'StandardFormat' object has no attribute 'line_num' + +``toolbox.StandardFormat.raw_fields()`` +--------------------------------------- +``raw_fields()`` returns an iterator over tuples of two strings representing the +marker and its value. The marker is given without the backslash and the value +without its trailing newline: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx a value\n\\lx another value\n') + >>> list(f.raw_fields()) + [('lx', 'a value'), ('lx', 'another value')] + +an empty file returns nothing: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('') + >>> list(f.raw_fields()) + [] + +file with only a newline returns WHAT SHOULD IT RETURN???: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\n') + >>> list(f.raw_fields()) + [(None, '')] + +file with only one field should be parsed ok: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx one value\n') + >>> list(f.raw_fields()) + [('lx', 'one value')] + +file without a trailing newline should be parsed ok: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx a value\n\\lx another value') + >>> list(f.raw_fields()) + [('lx', 'a value'), ('lx', 'another value')] + +trailing white space is preserved except for the final newline: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx trailing space \n\\lx trailing tab\t\n\\lx extra newline\n\n') + >>> list(f.raw_fields()) + [('lx', 'trailing space '), ('lx', 'trailing tab\t'), ('lx', 'extra newline\n')] + +line wrapping is preserved: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx a value\nmore of the value\nand still more\n\\lc another val\n') + >>> list(f.raw_fields()) + [('lx', 'a value\nmore of the value\nand still more'), ('lc', 'another val')] + +file beginning with a multiline record should be parsed ok: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx a value\nmore of the value\nand still more\n\\lc another val\n') + >>> list(f.raw_fields()) + [('lx', 'a value\nmore of the value\nand still more'), ('lc', 'another val')] + +file ending with a multiline record should be parsed ok: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lc a value\n\\lx another value\nmore of the value\nand still more\n') + >>> list(f.raw_fields()) + [('lc', 'a value'), ('lx', 'another value\nmore of the value\nand still more')] + +file beginning with a BOM should be parsed ok: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\xef\xbb\xbf\\lx a value\n\\lx another value\n') + >>> list(f.raw_fields()) + [('lx', 'a value'), ('lx', 'another value')] + +file beginning with two BOMs should ignore only the first one: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\xef\xbb\xbf\xef\xbb\xbf\\lx a value\n\\lx another value\n') + >>> list(f.raw_fields()) + [(None, '\xef\xbb\xbf\\lx a value'), ('lx', 'another value')] + +should not ignore a BOM not at the beginning of the file: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx a value\n\xef\xbb\xbf\\lx another value\n') + >>> list(f.raw_fields()) + [('lx', 'a value\n\xef\xbb\xbf\\lx another value')] + +``toolbox.StandardFormat.fields()`` +----------------------------------- +trailing white space is not preserved: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx trailing space \n\\lx trailing tab\t\n\\lx extra newline\n\n') + >>> list(f.fields()) + [('lx', 'trailing space'), ('lx', 'trailing tab'), ('lx', 'extra newline')] + +multiline fields are unwrapped: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx a value\nmore of the value\nand still more\n\\lc another val\n') + >>> list(f.fields()) + [('lx', 'a value more of the value and still more'), ('lc', 'another val')] + +markers +------- +A backslash in the first position on a new line indicates the start of a +marker. The backslash is not part of the marker: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\mk a value\n') + >>> list(f.fields()) + [('mk', 'a value')] + +If the backslash occurs later in the line it does not indicate the start +of a marker: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\mk a value\n \\mk another one\n') + >>> list(f.raw_fields()) + [('mk', 'a value\n \\mk another one')] + +There is no specific limit to the length of a marker: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\this_is_an_extremely_long_marker value\n') + >>> list(f.fields()) + [('this_is_an_extremely_long_marker', 'value')] + +A marker can contain any non white space character: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\`~!@#$%^&*()_-=+[{]}\\|,<.>/?;:"0123456789 value\n') + >>> list(f.fields()) + [('`~!@#$%^&*()_-=+[{]}\\|,<.>/?;:"0123456789', 'value')] + +A marker is terminated by any white space character: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\mk a value\n\\mk\tanother one\n\\mk\rthird one\n\\mk\ffourth one') + >>> list(f.fields()) + [('mk', 'a value'), ('mk', 'another one'), ('mk', 'third one'), ('mk', 'fourth one')] + +Consecutive whitespace characters (except newline) are treated the same as one: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\mk \t\r\fa value\n') + >>> list(f.fields()) + [('mk', 'a value')] + +----------------------- +``toolbox.ToolboxData`` +----------------------- + + >>> db = toolbox.ToolboxData() + +``toolbox.ToolboxData.parse()`` +------------------------------- +check that normal parsing works: + + >>> from xml.etree import ElementTree + >>> td = toolbox.ToolboxData() + >>> s = """\\_sh v3.0 400 Rotokas Dictionary + ... \\_DateStampHasFourDigitYear + ... + ... \\lx kaa + ... \\ps V.A + ... \\ge gag + ... \\gp nek i pas + ... + ... \\lx kaa + ... \\ps V.B + ... \\ge strangle + ... \\gp pasim nek + ... """ + >>> td.open_string(s) + >>> tree = td.parse(key='lx') + >>> tree.tag + 'toolbox_data' + >>> ElementTree.tostring(list(tree)[0]).decode('utf8') + '

<_sh>v3.0 400 Rotokas Dictionary<_DateStampHasFourDigitYear />
' + >>> ElementTree.tostring(list(tree)[1]).decode('utf8') + 'kaaV.Agagnek i pas' + >>> ElementTree.tostring(list(tree)[2]).decode('utf8') + 'kaaV.Bstranglepasim nek' + +check that guessing the key marker works: + + >>> from xml.etree import ElementTree + >>> td = toolbox.ToolboxData() + >>> s = """\\_sh v3.0 400 Rotokas Dictionary + ... \\_DateStampHasFourDigitYear + ... + ... \\lx kaa + ... \\ps V.A + ... \\ge gag + ... \\gp nek i pas + ... + ... \\lx kaa + ... \\ps V.B + ... \\ge strangle + ... \\gp pasim nek + ... """ + >>> td.open_string(s) + >>> tree = td.parse() + >>> ElementTree.tostring(list(tree)[0]).decode('utf8') + '
<_sh>v3.0 400 Rotokas Dictionary<_DateStampHasFourDigitYear />
' + >>> ElementTree.tostring(list(tree)[1]).decode('utf8') + 'kaaV.Agagnek i pas' + >>> ElementTree.tostring(list(tree)[2]).decode('utf8') + 'kaaV.Bstranglepasim nek' + +----------------------- +``toolbox`` functions +----------------------- + +``toolbox.to_sfm_string()`` +------------------------------- diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_pl196x.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_pl196x.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..206209dfe63eebdcfb30a0c214d7ea95cb0da1f5 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_pl196x.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_ribes.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_ribes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b8bfc1acb3fff3fd7af5a1da99416f04743d7e8 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/__pycache__/test_ribes.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_cfg2chomsky.py b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_cfg2chomsky.py new file mode 100644 index 0000000000000000000000000000000000000000..d8481ab0e70a50f4840110aaa8cedcf314294a8a --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_cfg2chomsky.py @@ -0,0 +1,49 @@ +import unittest + +import nltk +from nltk.grammar import CFG + + +class ChomskyNormalFormForCFGTest(unittest.TestCase): + def test_simple(self): + grammar = CFG.fromstring( + """ + S -> NP VP + PP -> P NP + NP -> Det N | NP PP P + VP -> V NP | VP PP + VP -> Det + Det -> 'a' | 'the' + N -> 'dog' | 'cat' + V -> 'chased' | 'sat' + P -> 'on' | 'in' + """ + ) + self.assertFalse(grammar.is_flexible_chomsky_normal_form()) + self.assertFalse(grammar.is_chomsky_normal_form()) + grammar = grammar.chomsky_normal_form(flexible=True) + self.assertTrue(grammar.is_flexible_chomsky_normal_form()) + self.assertFalse(grammar.is_chomsky_normal_form()) + + grammar2 = CFG.fromstring( + """ + S -> NP VP + NP -> VP N P + VP -> P + N -> 'dog' | 'cat' + P -> 'on' | 'in' + """ + ) + self.assertFalse(grammar2.is_flexible_chomsky_normal_form()) + self.assertFalse(grammar2.is_chomsky_normal_form()) + grammar2 = grammar2.chomsky_normal_form() + self.assertTrue(grammar2.is_flexible_chomsky_normal_form()) + self.assertTrue(grammar2.is_chomsky_normal_form()) + + def test_complex(self): + grammar = nltk.data.load("grammars/large_grammars/atis.cfg") + self.assertFalse(grammar.is_flexible_chomsky_normal_form()) + self.assertFalse(grammar.is_chomsky_normal_form()) + grammar = grammar.chomsky_normal_form(flexible=True) + self.assertTrue(grammar.is_flexible_chomsky_normal_form()) + self.assertFalse(grammar.is_chomsky_normal_form()) diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_concordance.py b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_concordance.py new file mode 100644 index 0000000000000000000000000000000000000000..23115bfac4bb7f661551f192d9e0ec6ce5fc699f --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_concordance.py @@ -0,0 +1,98 @@ +import contextlib +import sys +import unittest +from io import StringIO + +from nltk.corpus import gutenberg +from nltk.text import Text + + +@contextlib.contextmanager +def stdout_redirect(where): + sys.stdout = where + try: + yield where + finally: + sys.stdout = sys.__stdout__ + + +class TestConcordance(unittest.TestCase): + """Text constructed using: https://www.nltk.org/book/ch01.html""" + + @classmethod + def setUpClass(cls): + cls.corpus = gutenberg.words("melville-moby_dick.txt") + + @classmethod + def tearDownClass(cls): + pass + + def setUp(self): + self.text = Text(TestConcordance.corpus) + self.query = "monstrous" + self.maxDiff = None + self.list_out = [ + "ong the former , one was of a most monstrous size . ... This came towards us , ", + 'ON OF THE PSALMS . " Touching that monstrous bulk of the whale or ork we have r', + "ll over with a heathenish array of monstrous clubs and spears . Some were thick", + "d as you gazed , and wondered what monstrous cannibal and savage could ever hav", + "that has survived the flood ; most monstrous and most mountainous ! That Himmal", + "they might scout at Moby Dick as a monstrous fable , or still worse and more de", + "th of Radney .'\" CHAPTER 55 Of the Monstrous Pictures of Whales . I shall ere l", + "ing Scenes . In connexion with the monstrous pictures of whales , I am strongly", + "ere to enter upon those still more monstrous stories of them which are to be fo", + "ght have been rummaged out of this monstrous cabinet there is no telling . But ", + "of Whale - Bones ; for Whales of a monstrous size are oftentimes cast up dead u", + ] + + def tearDown(self): + pass + + def test_concordance_list(self): + concordance_out = self.text.concordance_list(self.query) + self.assertEqual(self.list_out, [c.line for c in concordance_out]) + + def test_concordance_width(self): + list_out = [ + "monstrous", + "monstrous", + "monstrous", + "monstrous", + "monstrous", + "monstrous", + "Monstrous", + "monstrous", + "monstrous", + "monstrous", + "monstrous", + ] + + concordance_out = self.text.concordance_list(self.query, width=0) + self.assertEqual(list_out, [c.query for c in concordance_out]) + + def test_concordance_lines(self): + concordance_out = self.text.concordance_list(self.query, lines=3) + self.assertEqual(self.list_out[:3], [c.line for c in concordance_out]) + + def test_concordance_print(self): + print_out = """Displaying 11 of 11 matches: + ong the former , one was of a most monstrous size . ... This came towards us , + ON OF THE PSALMS . " Touching that monstrous bulk of the whale or ork we have r + ll over with a heathenish array of monstrous clubs and spears . Some were thick + d as you gazed , and wondered what monstrous cannibal and savage could ever hav + that has survived the flood ; most monstrous and most mountainous ! That Himmal + they might scout at Moby Dick as a monstrous fable , or still worse and more de + th of Radney .'" CHAPTER 55 Of the Monstrous Pictures of Whales . I shall ere l + ing Scenes . In connexion with the monstrous pictures of whales , I am strongly + ere to enter upon those still more monstrous stories of them which are to be fo + ght have been rummaged out of this monstrous cabinet there is no telling . But + of Whale - Bones ; for Whales of a monstrous size are oftentimes cast up dead u + """ + + with stdout_redirect(StringIO()) as stdout: + self.text.concordance(self.query) + + def strip_space(raw_str): + return raw_str.replace(" ", "") + + self.assertEqual(strip_space(print_out), strip_space(stdout.getvalue())) diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_corpus_views.py b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_corpus_views.py new file mode 100644 index 0000000000000000000000000000000000000000..c6ae7bfba1eb98f15bed8ed424d5f9b26d37f6e1 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_corpus_views.py @@ -0,0 +1,48 @@ +""" +Corpus View Regression Tests +""" + +import unittest + +import nltk.data +from nltk.corpus.reader.util import ( + StreamBackedCorpusView, + read_line_block, + read_whitespace_block, +) + + +class TestCorpusViews(unittest.TestCase): + linetok = nltk.LineTokenizer(blanklines="keep") + names = [ + "corpora/inaugural/README", # A very short file (160 chars) + "corpora/inaugural/1793-Washington.txt", # A relatively short file (791 chars) + "corpora/inaugural/1909-Taft.txt", # A longer file (32k chars) + ] + + def data(self): + for name in self.names: + f = nltk.data.find(name) + with f.open() as fp: + file_data = fp.read().decode("utf8") + yield f, file_data + + def test_correct_values(self): + # Check that corpus views produce the correct sequence of values. + + for f, file_data in self.data(): + v = StreamBackedCorpusView(f, read_whitespace_block) + self.assertEqual(list(v), file_data.split()) + + v = StreamBackedCorpusView(f, read_line_block) + self.assertEqual(list(v), self.linetok.tokenize(file_data)) + + def test_correct_length(self): + # Check that the corpus views report the correct lengths: + + for f, file_data in self.data(): + v = StreamBackedCorpusView(f, read_whitespace_block) + self.assertEqual(len(v), len(file_data.split())) + + v = StreamBackedCorpusView(f, read_line_block) + self.assertEqual(len(v), len(self.linetok.tokenize(file_data))) diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_disagreement.py b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_disagreement.py new file mode 100644 index 0000000000000000000000000000000000000000..2bac342f5e77d428e83119a09d68dd7ef884ddb7 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_disagreement.py @@ -0,0 +1,160 @@ +import unittest + +from nltk.metrics.agreement import AnnotationTask + + +class TestDisagreement(unittest.TestCase): + """ + Class containing unit tests for nltk.metrics.agreement.Disagreement. + """ + + def test_easy(self): + """ + Simple test, based on + https://github.com/foolswood/krippendorffs_alpha/raw/master/krippendorff.pdf. + """ + data = [ + ("coder1", "dress1", "YES"), + ("coder2", "dress1", "NO"), + ("coder3", "dress1", "NO"), + ("coder1", "dress2", "YES"), + ("coder2", "dress2", "NO"), + ("coder3", "dress3", "NO"), + ] + annotation_task = AnnotationTask(data) + self.assertAlmostEqual(annotation_task.alpha(), -0.3333333) + + def test_easy2(self): + """ + Same simple test with 1 rating removed. + Removal of that rating should not matter: K-Apha ignores items with + only 1 rating. + """ + data = [ + ("coder1", "dress1", "YES"), + ("coder2", "dress1", "NO"), + ("coder3", "dress1", "NO"), + ("coder1", "dress2", "YES"), + ("coder2", "dress2", "NO"), + ] + annotation_task = AnnotationTask(data) + self.assertAlmostEqual(annotation_task.alpha(), -0.3333333) + + def test_easy3(self): + """ + If expected disagreement is 0, K-Apha should be 1. + """ + data = [ + ("coder1", "1", 1), + ("coder2", "1", 1), + ("coder1", "2", 2), + ("coder2", "2", 2), + ] + annotation_task = AnnotationTask(data) + self.assertAlmostEqual(annotation_task.alpha(), 1.0) + + data = [("coder1", "1", 1), ("coder2", "1", 1), ("coder1", "2", 2)] + annotation_task = AnnotationTask(data) + self.assertAlmostEqual(annotation_task.alpha(), 1.0) + + def test_advanced(self): + """ + More advanced test, based on + http://www.agreestat.com/research_papers/onkrippendorffalpha.pdf + """ + data = [ + ("A", "1", "1"), + ("B", "1", "1"), + ("D", "1", "1"), + ("A", "2", "2"), + ("B", "2", "2"), + ("C", "2", "3"), + ("D", "2", "2"), + ("A", "3", "3"), + ("B", "3", "3"), + ("C", "3", "3"), + ("D", "3", "3"), + ("A", "4", "3"), + ("B", "4", "3"), + ("C", "4", "3"), + ("D", "4", "3"), + ("A", "5", "2"), + ("B", "5", "2"), + ("C", "5", "2"), + ("D", "5", "2"), + ("A", "6", "1"), + ("B", "6", "2"), + ("C", "6", "3"), + ("D", "6", "4"), + ("A", "7", "4"), + ("B", "7", "4"), + ("C", "7", "4"), + ("D", "7", "4"), + ("A", "8", "1"), + ("B", "8", "1"), + ("C", "8", "2"), + ("D", "8", "1"), + ("A", "9", "2"), + ("B", "9", "2"), + ("C", "9", "2"), + ("D", "9", "2"), + ("B", "10", "5"), + ("C", "10", "5"), + ("D", "10", "5"), + ("C", "11", "1"), + ("D", "11", "1"), + ("C", "12", "3"), + ] + annotation_task = AnnotationTask(data) + self.assertAlmostEqual(annotation_task.alpha(), 0.743421052632) + + def test_advanced2(self): + """ + Same more advanced example, but with 1 rating removed. + Again, removal of that 1 rating should not matter. + """ + data = [ + ("A", "1", "1"), + ("B", "1", "1"), + ("D", "1", "1"), + ("A", "2", "2"), + ("B", "2", "2"), + ("C", "2", "3"), + ("D", "2", "2"), + ("A", "3", "3"), + ("B", "3", "3"), + ("C", "3", "3"), + ("D", "3", "3"), + ("A", "4", "3"), + ("B", "4", "3"), + ("C", "4", "3"), + ("D", "4", "3"), + ("A", "5", "2"), + ("B", "5", "2"), + ("C", "5", "2"), + ("D", "5", "2"), + ("A", "6", "1"), + ("B", "6", "2"), + ("C", "6", "3"), + ("D", "6", "4"), + ("A", "7", "4"), + ("B", "7", "4"), + ("C", "7", "4"), + ("D", "7", "4"), + ("A", "8", "1"), + ("B", "8", "1"), + ("C", "8", "2"), + ("D", "8", "1"), + ("A", "9", "2"), + ("B", "9", "2"), + ("C", "9", "2"), + ("D", "9", "2"), + ("B", "10", "5"), + ("C", "10", "5"), + ("D", "10", "5"), + ("C", "11", "1"), + ("D", "11", "1"), + ("C", "12", "3"), + ] + annotation_task = AnnotationTask(data) + self.assertAlmostEqual(annotation_task.alpha(), 0.743421052632) diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_distance.py b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_distance.py new file mode 100644 index 0000000000000000000000000000000000000000..96d814d0b830c4d6b5196eb384447a5222ea36a5 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_distance.py @@ -0,0 +1,129 @@ +from typing import Tuple + +import pytest + +from nltk.metrics.distance import edit_distance + + +class TestEditDistance: + @pytest.mark.parametrize( + "left,right,substitution_cost,expecteds", + [ + # Allowing transpositions reduces the number of edits required. + # with transpositions: + # e.g. "abc" -T-> "cba" -D-> "ca": 2 steps + # + # without transpositions: + # e.g. "abc" -D-> "ab" -D-> "a" -I-> "ca": 3 steps + ("abc", "ca", 1, (2, 3)), + ("abc", "ca", 5, (2, 3)), # Doesn't *require* substitutions + # Note, a substition_cost of higher than 2 doesn't make much + # sense, as a deletion + insertion is identical, and always + # costs 2. + # + # + # Transpositions don't always reduce the number of edits required: + # with or without transpositions: + # e.g. "wants" -D-> "wats" -D-> "was" -I-> "wasp": 3 steps + ("wants", "wasp", 1, (3, 3)), + ("wants", "wasp", 5, (3, 3)), # Doesn't *require* substitutions + # + # + # Ought to have the same results with and without transpositions + # with or without transpositions: + # e.g. "rain" -S-> "sain" -S-> "shin" -I-> "shine": 3 steps + # (but cost 5 if substitution_cost=2) + ("rain", "shine", 1, (3, 3)), + ("rain", "shine", 2, (5, 5)), # Does *require* substitutions + # + # + # Several potentially interesting typos + # with transpositions: + # e.g. "acbdef" -T-> "abcdef": 1 step + # + # without transpositions: + # e.g. "acbdef" -D-> "abdef" -I-> "abcdef": 2 steps + ("acbdef", "abcdef", 1, (1, 2)), + ("acbdef", "abcdef", 2, (1, 2)), # Doesn't *require* substitutions + # + # + # with transpositions: + # e.g. "lnaguaeg" -T-> "languaeg" -T-> "language": 2 steps + # + # without transpositions: + # e.g. "lnaguaeg" -D-> "laguaeg" -I-> "languaeg" -D-> "languag" -I-> "language": 4 steps + ("lnaguaeg", "language", 1, (2, 4)), + ("lnaguaeg", "language", 2, (2, 4)), # Doesn't *require* substitutions + # + # + # with transpositions: + # e.g. "lnaugage" -T-> "lanugage" -T-> "language": 2 steps + # + # without transpositions: + # e.g. "lnaugage" -S-> "lnangage" -D-> "langage" -I-> "language": 3 steps + # (but one substitution, so a cost of 4 if substition_cost = 2) + ("lnaugage", "language", 1, (2, 3)), + ("lnaugage", "language", 2, (2, 4)), + # Does *require* substitutions if no transpositions + # + # + # with transpositions: + # e.g. "lngauage" -T-> "lnaguage" -T-> "language": 2 steps + # without transpositions: + # e.g. "lngauage" -I-> "lanaguage" -D-> "language": 2 steps + ("lngauage", "language", 1, (2, 2)), + ("lngauage", "language", 2, (2, 2)), # Doesn't *require* substitutions + # + # + # with or without transpositions: + # e.g. "wants" -S-> "sants" -S-> "swnts" -S-> "swits" -S-> "swims" -D-> "swim": 5 steps + # + # with substitution_cost=2 and transpositions: + # e.g. "wants" -T-> "santw" -D-> "sntw" -D-> "stw" -D-> "sw" + # -I-> "swi" -I-> "swim": 6 steps + # + # with substitution_cost=2 and no transpositions: + # e.g. "wants" -I-> "swants" -D-> "swant" -D-> "swan" -D-> "swa" -D-> "sw" + # -I-> "swi" -I-> "swim": 7 steps + ("wants", "swim", 1, (5, 5)), + ("wants", "swim", 2, (6, 7)), + # + # + # with or without transpositions: + # e.g. "kitten" -S-> "sitten" -s-> "sittin" -I-> "sitting": 3 steps + # (but cost 5 if substitution_cost=2) + ("kitten", "sitting", 1, (3, 3)), + ("kitten", "sitting", 2, (5, 5)), + # + # duplicated letter + # e.g. "duplicated" -D-> "duplicated" + ("duplicated", "duuplicated", 1, (1, 1)), + ("duplicated", "duuplicated", 2, (1, 1)), + ("very duplicated", "very duuplicateed", 2, (2, 2)), + ], + ) + def test_with_transpositions( + self, left: str, right: str, substitution_cost: int, expecteds: Tuple[int, int] + ): + """ + Test `edit_distance` between two strings, given some `substitution_cost`, + and whether transpositions are allowed. + + :param str left: First input string to `edit_distance`. + :param str right: Second input string to `edit_distance`. + :param int substitution_cost: The cost of a substitution action in `edit_distance`. + :param Tuple[int, int] expecteds: A tuple of expected outputs, such that `expecteds[0]` is + the expected output with `transpositions=True`, and `expecteds[1]` is + the expected output with `transpositions=False`. + """ + # Test the input strings in both orderings + for s1, s2 in ((left, right), (right, left)): + # zip with [True, False] to get the transpositions value + for expected, transpositions in zip(expecteds, [True, False]): + predicted = edit_distance( + s1, + s2, + substitution_cost=substitution_cost, + transpositions=transpositions, + ) + assert predicted == expected diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_metrics.py b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..479e131c47509bb96efca3c43d8f80ea95a8286d --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_metrics.py @@ -0,0 +1,66 @@ +import unittest + +from nltk.metrics import ( + BigramAssocMeasures, + QuadgramAssocMeasures, + TrigramAssocMeasures, +) + +## Test the likelihood ratio metric + +_DELTA = 1e-8 + + +class TestLikelihoodRatio(unittest.TestCase): + def test_lr_bigram(self): + self.assertAlmostEqual( + BigramAssocMeasures.likelihood_ratio(2, (4, 4), 20), + 2.4142743368419755, + delta=_DELTA, + ) + self.assertAlmostEqual( + BigramAssocMeasures.likelihood_ratio(1, (1, 1), 1), 0.0, delta=_DELTA + ) + self.assertRaises( + ValueError, + BigramAssocMeasures.likelihood_ratio, + *(0, (2, 2), 2), + ) + + def test_lr_trigram(self): + self.assertAlmostEqual( + TrigramAssocMeasures.likelihood_ratio(1, (1, 1, 1), (1, 1, 1), 2), + 5.545177444479562, + delta=_DELTA, + ) + self.assertAlmostEqual( + TrigramAssocMeasures.likelihood_ratio(1, (1, 1, 1), (1, 1, 1), 1), + 0.0, + delta=_DELTA, + ) + self.assertRaises( + ValueError, + TrigramAssocMeasures.likelihood_ratio, + *(1, (1, 1, 2), (1, 1, 2), 2), + ) + + def test_lr_quadgram(self): + self.assertAlmostEqual( + QuadgramAssocMeasures.likelihood_ratio( + 1, (1, 1, 1, 1), (1, 1, 1, 1, 1, 1), (1, 1, 1, 1), 2 + ), + 8.317766166719343, + delta=_DELTA, + ) + self.assertAlmostEqual( + QuadgramAssocMeasures.likelihood_ratio( + 1, (1, 1, 1, 1), (1, 1, 1, 1, 1, 1), (1, 1, 1, 1), 1 + ), + 0.0, + delta=_DELTA, + ) + self.assertRaises( + ValueError, + QuadgramAssocMeasures.likelihood_ratio, + *(1, (1, 1, 1, 1), (1, 1, 1, 1, 1, 2), (1, 1, 1, 1), 1), + ) diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_rte_classify.py b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_rte_classify.py new file mode 100644 index 0000000000000000000000000000000000000000..0a573ea7e291c46e22ceb9ae39fc4aff880c6398 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_rte_classify.py @@ -0,0 +1,94 @@ +import pytest + +from nltk import config_megam +from nltk.classify.rte_classify import RTEFeatureExtractor, rte_classifier, rte_features +from nltk.corpus import rte as rte_corpus + +expected_from_rte_feature_extration = """ +alwayson => True +ne_hyp_extra => 0 +ne_overlap => 1 +neg_hyp => 0 +neg_txt => 0 +word_hyp_extra => 3 +word_overlap => 3 + +alwayson => True +ne_hyp_extra => 0 +ne_overlap => 1 +neg_hyp => 0 +neg_txt => 0 +word_hyp_extra => 2 +word_overlap => 1 + +alwayson => True +ne_hyp_extra => 1 +ne_overlap => 1 +neg_hyp => 0 +neg_txt => 0 +word_hyp_extra => 1 +word_overlap => 2 + +alwayson => True +ne_hyp_extra => 1 +ne_overlap => 0 +neg_hyp => 0 +neg_txt => 0 +word_hyp_extra => 6 +word_overlap => 2 + +alwayson => True +ne_hyp_extra => 1 +ne_overlap => 0 +neg_hyp => 0 +neg_txt => 0 +word_hyp_extra => 4 +word_overlap => 0 + +alwayson => True +ne_hyp_extra => 1 +ne_overlap => 0 +neg_hyp => 0 +neg_txt => 0 +word_hyp_extra => 3 +word_overlap => 1 +""" + + +class TestRTEClassifier: + # Test the feature extraction method. + def test_rte_feature_extraction(self): + pairs = rte_corpus.pairs(["rte1_dev.xml"])[:6] + test_output = [ + f"{key:<15} => {rte_features(pair)[key]}" + for pair in pairs + for key in sorted(rte_features(pair)) + ] + expected_output = expected_from_rte_feature_extration.strip().split("\n") + # Remove null strings. + expected_output = list(filter(None, expected_output)) + assert test_output == expected_output + + # Test the RTEFeatureExtractor object. + def test_feature_extractor_object(self): + rtepair = rte_corpus.pairs(["rte3_dev.xml"])[33] + extractor = RTEFeatureExtractor(rtepair) + + assert extractor.hyp_words == {"member", "China", "SCO."} + assert extractor.overlap("word") == set() + assert extractor.overlap("ne") == {"China"} + assert extractor.hyp_extra("word") == {"member"} + + # Test the RTE classifier training. + def test_rte_classification_without_megam(self): + # Use a sample size for unit testing, since we + # don't need to fully train these classifiers + clf = rte_classifier("IIS", sample_N=100) + clf = rte_classifier("GIS", sample_N=100) + + def test_rte_classification_with_megam(self): + try: + config_megam() + except (LookupError, AttributeError) as e: + pytest.skip("Skipping tests with dependencies on MEGAM") + clf = rte_classifier("megam", sample_N=100) diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_tag.py b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_tag.py new file mode 100644 index 0000000000000000000000000000000000000000..6be90e88e1671074db2503376b8a291f5d45007f --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_tag.py @@ -0,0 +1,23 @@ +def test_basic(): + from nltk.tag import pos_tag + from nltk.tokenize import word_tokenize + + result = pos_tag(word_tokenize("John's big idea isn't all that bad.")) + assert result == [ + ("John", "NNP"), + ("'s", "POS"), + ("big", "JJ"), + ("idea", "NN"), + ("is", "VBZ"), + ("n't", "RB"), + ("all", "PDT"), + ("that", "DT"), + ("bad", "JJ"), + (".", "."), + ] + + +def setup_module(module): + import pytest + + pytest.importorskip("numpy") diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_tgrep.py b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_tgrep.py new file mode 100644 index 0000000000000000000000000000000000000000..e564d492e334965426cc97fada992c0843c9b12b --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_tgrep.py @@ -0,0 +1,779 @@ +#!/usr/bin/env python +# +# Natural Language Toolkit: TGrep search +# +# Copyright (C) 2001-2024 NLTK Project +# Author: Will Roberts +# URL: +# For license information, see LICENSE.TXT + +""" +Unit tests for nltk.tgrep. +""" + + +import unittest + +from nltk import tgrep +from nltk.tree import ParentedTree + + +class TestSequenceFunctions(unittest.TestCase): + """ + Class containing unit tests for nltk.tgrep. + """ + + def test_tokenize_simple(self): + """ + Simple test of tokenization. + """ + tokens = tgrep.tgrep_tokenize("A .. (B !< C . D) | ![<< (E , F) $ G]") + self.assertEqual( + tokens, + [ + "A", + "..", + "(", + "B", + "!", + "<", + "C", + ".", + "D", + ")", + "|", + "!", + "[", + "<<", + "(", + "E", + ",", + "F", + ")", + "$", + "G", + "]", + ], + ) + + def test_tokenize_encoding(self): + """ + Test that tokenization handles bytes and strs the same way. + """ + self.assertEqual( + tgrep.tgrep_tokenize(b"A .. (B !< C . D) | ![<< (E , F) $ G]"), + tgrep.tgrep_tokenize("A .. (B !< C . D) | ![<< (E , F) $ G]"), + ) + + def test_tokenize_link_types(self): + """ + Test tokenization of basic link types. + """ + self.assertEqual(tgrep.tgrep_tokenize("AB"), ["A", ">", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A<3B"), ["A", "<3", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A>3B"), ["A", ">3", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A<,B"), ["A", "<,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A>,B"), ["A", ">,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A<-3B"), ["A", "<-3", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A>-3B"), ["A", ">-3", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A<-B"), ["A", "<-", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A>-B"), ["A", ">-", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A<'B"), ["A", "<'", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A>'B"), ["A", ">'", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A<:B"), ["A", "<:", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A>:B"), ["A", ">:", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A<>B"), ["A", ">>", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A<<,B"), ["A", "<<,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A>>,B"), ["A", ">>,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A<<'B"), ["A", "<<'", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A>>'B"), ["A", ">>'", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A<<:B"), ["A", "<<:", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A>>:B"), ["A", ">>:", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A.B"), ["A", ".", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A,B"), ["A", ",", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A..B"), ["A", "..", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A,,B"), ["A", ",,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A$B"), ["A", "$", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A$.B"), ["A", "$.", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A$,B"), ["A", "$,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A$..B"), ["A", "$..", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A$,,B"), ["A", "$,,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!B"), ["A", "!", ">", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!<3B"), ["A", "!", "<3", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!>3B"), ["A", "!", ">3", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!<,B"), ["A", "!", "<,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!>,B"), ["A", "!", ">,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!<-3B"), ["A", "!", "<-3", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!>-3B"), ["A", "!", ">-3", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!<-B"), ["A", "!", "<-", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!>-B"), ["A", "!", ">-", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!<'B"), ["A", "!", "<'", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!>'B"), ["A", "!", ">'", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!<:B"), ["A", "!", "<:", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!>:B"), ["A", "!", ">:", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!<>B"), ["A", "!", ">>", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!<<,B"), ["A", "!", "<<,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!>>,B"), ["A", "!", ">>,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!<<'B"), ["A", "!", "<<'", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!>>'B"), ["A", "!", ">>'", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!<<:B"), ["A", "!", "<<:", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!>>:B"), ["A", "!", ">>:", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!.B"), ["A", "!", ".", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!,B"), ["A", "!", ",", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!..B"), ["A", "!", "..", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!,,B"), ["A", "!", ",,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!$B"), ["A", "!", "$", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!$.B"), ["A", "!", "$.", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!$,B"), ["A", "!", "$,", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!$..B"), ["A", "!", "$..", "B"]) + self.assertEqual(tgrep.tgrep_tokenize("A!$,,B"), ["A", "!", "$,,", "B"]) + + def test_tokenize_examples(self): + """ + Test tokenization of the TGrep2 manual example patterns. + """ + self.assertEqual(tgrep.tgrep_tokenize("NP < PP"), ["NP", "<", "PP"]) + self.assertEqual(tgrep.tgrep_tokenize("/^NP/"), ["/^NP/"]) + self.assertEqual( + tgrep.tgrep_tokenize("NP << PP . VP"), ["NP", "<<", "PP", ".", "VP"] + ) + self.assertEqual( + tgrep.tgrep_tokenize("NP << PP | . VP"), ["NP", "<<", "PP", "|", ".", "VP"] + ) + self.assertEqual( + tgrep.tgrep_tokenize("NP !<< PP [> NP | >> VP]"), + ["NP", "!", "<<", "PP", "[", ">", "NP", "|", ">>", "VP", "]"], + ) + self.assertEqual( + tgrep.tgrep_tokenize("NP << (PP . VP)"), + ["NP", "<<", "(", "PP", ".", "VP", ")"], + ) + self.assertEqual( + tgrep.tgrep_tokenize("NP <' (PP <, (IN < on))"), + ["NP", "<'", "(", "PP", "<,", "(", "IN", "<", "on", ")", ")"], + ) + self.assertEqual( + tgrep.tgrep_tokenize("S < (A < B) < C"), + ["S", "<", "(", "A", "<", "B", ")", "<", "C"], + ) + self.assertEqual( + tgrep.tgrep_tokenize("S < ((A < B) < C)"), + ["S", "<", "(", "(", "A", "<", "B", ")", "<", "C", ")"], + ) + self.assertEqual( + tgrep.tgrep_tokenize("S < (A < B < C)"), + ["S", "<", "(", "A", "<", "B", "<", "C", ")"], + ) + self.assertEqual(tgrep.tgrep_tokenize("A3B"3B"', "<", "C"], + ) + + def test_tokenize_nodenames(self): + """ + Test tokenization of node names. + """ + self.assertEqual(tgrep.tgrep_tokenize("Robert"), ["Robert"]) + self.assertEqual(tgrep.tgrep_tokenize("/^[Bb]ob/"), ["/^[Bb]ob/"]) + self.assertEqual(tgrep.tgrep_tokenize("*"), ["*"]) + self.assertEqual(tgrep.tgrep_tokenize("__"), ["__"]) + # test tokenization of NLTK tree position syntax + self.assertEqual(tgrep.tgrep_tokenize("N()"), ["N(", ")"]) + self.assertEqual(tgrep.tgrep_tokenize("N(0,)"), ["N(", "0", ",", ")"]) + self.assertEqual(tgrep.tgrep_tokenize("N(0,0)"), ["N(", "0", ",", "0", ")"]) + self.assertEqual( + tgrep.tgrep_tokenize("N(0,0,)"), ["N(", "0", ",", "0", ",", ")"] + ) + + def test_tokenize_macros(self): + """ + Test tokenization of macro definitions. + """ + self.assertEqual( + tgrep.tgrep_tokenize( + "@ NP /^NP/;\n@ NN /^NN/;\n@NP [!< NP | < @NN] !$.. @NN" + ), + [ + "@", + "NP", + "/^NP/", + ";", + "@", + "NN", + "/^NN/", + ";", + "@NP", + "[", + "!", + "<", + "NP", + "|", + "<", + "@NN", + "]", + "!", + "$..", + "@NN", + ], + ) + + def test_node_simple(self): + """ + Test a simple use of tgrep for finding nodes matching a given + pattern. + """ + tree = ParentedTree.fromstring( + "(S (NP (DT the) (JJ big) (NN dog)) " "(VP bit) (NP (DT a) (NN cat)))" + ) + self.assertEqual(list(tgrep.tgrep_positions("NN", [tree])), [[(0, 2), (2, 1)]]) + self.assertEqual( + list(tgrep.tgrep_nodes("NN", [tree])), [[tree[0, 2], tree[2, 1]]] + ) + self.assertEqual( + list(tgrep.tgrep_positions("NN|JJ", [tree])), [[(0, 1), (0, 2), (2, 1)]] + ) + + def test_node_printing(self): + """Test that the tgrep print operator ' is properly ignored.""" + tree = ParentedTree.fromstring("(S (n x) (N x))") + self.assertEqual( + list(tgrep.tgrep_positions("N", [tree])), + list(tgrep.tgrep_positions("'N", [tree])), + ) + self.assertEqual( + list(tgrep.tgrep_positions("/[Nn]/", [tree])), + list(tgrep.tgrep_positions("'/[Nn]/", [tree])), + ) + + def test_node_encoding(self): + """ + Test that tgrep search strings handles bytes and strs the same + way. + """ + tree = ParentedTree.fromstring( + "(S (NP (DT the) (JJ big) (NN dog)) " "(VP bit) (NP (DT a) (NN cat)))" + ) + self.assertEqual( + list(tgrep.tgrep_positions(b"NN", [tree])), + list(tgrep.tgrep_positions(b"NN", [tree])), + ) + self.assertEqual( + list(tgrep.tgrep_nodes(b"NN", [tree])), + list(tgrep.tgrep_nodes("NN", [tree])), + ) + self.assertEqual( + list(tgrep.tgrep_positions(b"NN|JJ", [tree])), + list(tgrep.tgrep_positions("NN|JJ", [tree])), + ) + + def test_node_nocase(self): + """ + Test selecting nodes using case insensitive node names. + """ + tree = ParentedTree.fromstring("(S (n x) (N x))") + self.assertEqual(list(tgrep.tgrep_positions('"N"', [tree])), [[(1,)]]) + self.assertEqual(list(tgrep.tgrep_positions('i@"N"', [tree])), [[(0,), (1,)]]) + + def test_node_quoted(self): + """ + Test selecting nodes using quoted node names. + """ + tree = ParentedTree.fromstring('(N ("N" x) (N" x) ("\\" x))') + self.assertEqual(list(tgrep.tgrep_positions('"N"', [tree])), [[()]]) + self.assertEqual(list(tgrep.tgrep_positions('"\\"N\\""', [tree])), [[(0,)]]) + self.assertEqual(list(tgrep.tgrep_positions('"N\\""', [tree])), [[(1,)]]) + self.assertEqual(list(tgrep.tgrep_positions('"\\"\\\\\\""', [tree])), [[(2,)]]) + + def test_node_regex(self): + """ + Test regex matching on nodes. + """ + tree = ParentedTree.fromstring("(S (NP-SBJ x) (NP x) (NNP x) (VP x))") + # This is a regular expression that matches any node whose + # name starts with NP, including NP-SBJ: + self.assertEqual(list(tgrep.tgrep_positions("/^NP/", [tree])), [[(0,), (1,)]]) + + def test_node_regex_2(self): + """ + Test regex matching on nodes. + """ + tree = ParentedTree.fromstring("(S (SBJ x) (SBJ1 x) (NP-SBJ x))") + self.assertEqual(list(tgrep.tgrep_positions("/^SBJ/", [tree])), [[(0,), (1,)]]) + # This is a regular expression that matches any node whose + # name includes SBJ, including NP-SBJ: + self.assertEqual( + list(tgrep.tgrep_positions("/SBJ/", [tree])), [[(0,), (1,), (2,)]] + ) + + def test_node_tree_position(self): + """ + Test matching on nodes based on NLTK tree position. + """ + tree = ParentedTree.fromstring("(S (NP-SBJ x) (NP x) (NNP x) (VP x))") + # test all tree positions that are not leaves + leaf_positions = {tree.leaf_treeposition(x) for x in range(len(tree.leaves()))} + tree_positions = [x for x in tree.treepositions() if x not in leaf_positions] + for position in tree_positions: + node_id = f"N{position}" + tgrep_positions = list(tgrep.tgrep_positions(node_id, [tree])) + self.assertEqual(len(tgrep_positions[0]), 1) + self.assertEqual(tgrep_positions[0][0], position) + + def test_node_noleaves(self): + """ + Test node name matching with the search_leaves flag set to False. + """ + tree = ParentedTree.fromstring("(S (A (T x)) (B (N x)))") + self.assertEqual( + list(tgrep.tgrep_positions("x", [tree])), [[(0, 0, 0), (1, 0, 0)]] + ) + self.assertEqual(list(tgrep.tgrep_positions("x", [tree], False)), [[]]) + + def tests_rel_dominance(self): + """ + Test matching nodes based on dominance relations. + """ + tree = ParentedTree.fromstring("(S (A (T x)) (B (N x)))") + self.assertEqual(list(tgrep.tgrep_positions("* < T", [tree])), [[(0,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* < T > S", [tree])), [[(0,)]]) + self.assertEqual( + list(tgrep.tgrep_positions("* !< T", [tree])), + [[(), (0, 0), (0, 0, 0), (1,), (1, 0), (1, 0, 0)]], + ) + self.assertEqual(list(tgrep.tgrep_positions("* !< T > S", [tree])), [[(1,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* > A", [tree])), [[(0, 0)]]) + self.assertEqual(list(tgrep.tgrep_positions("* > B", [tree])), [[(1, 0)]]) + self.assertEqual( + list(tgrep.tgrep_positions("* !> B", [tree])), + [[(), (0,), (0, 0), (0, 0, 0), (1,), (1, 0, 0)]], + ) + self.assertEqual( + list(tgrep.tgrep_positions("* !> B >> S", [tree])), [[(0,), (0, 0), (1,)]] + ) + self.assertEqual( + list(tgrep.tgrep_positions("* >> S", [tree])), + [[(0,), (0, 0), (1,), (1, 0)]], + ) + self.assertEqual( + list(tgrep.tgrep_positions("* >>, S", [tree])), [[(0,), (0, 0)]] + ) + self.assertEqual( + list(tgrep.tgrep_positions("* >>' S", [tree])), [[(1,), (1, 0)]] + ) + # Known issue: + # self.assertEqual(list(tgrep.tgrep_positions('* !>> S', [tree])), + # [[()]]) + self.assertEqual(list(tgrep.tgrep_positions("* << T", [tree])), [[(), (0,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* <<' T", [tree])), [[(0,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* <<1 N", [tree])), [[(1,)]]) + self.assertEqual( + list(tgrep.tgrep_positions("* !<< T", [tree])), + [[(0, 0), (0, 0, 0), (1,), (1, 0), (1, 0, 0)]], + ) + tree = ParentedTree.fromstring("(S (A (T x)) (B (T x) (N x )))") + self.assertEqual(list(tgrep.tgrep_positions("* <: T", [tree])), [[(0,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* < T", [tree])), [[(0,), (1,)]]) + self.assertEqual( + list(tgrep.tgrep_positions("* !<: T", [tree])), + [[(), (0, 0), (0, 0, 0), (1,), (1, 0), (1, 0, 0), (1, 1), (1, 1, 0)]], + ) + self.assertEqual(list(tgrep.tgrep_positions("* !<: T > S", [tree])), [[(1,)]]) + tree = ParentedTree.fromstring("(S (T (A x) (B x)) (T (C x)))") + self.assertEqual(list(tgrep.tgrep_positions("* >: T", [tree])), [[(1, 0)]]) + self.assertEqual( + list(tgrep.tgrep_positions("* !>: T", [tree])), + [[(), (0,), (0, 0), (0, 0, 0), (0, 1), (0, 1, 0), (1,), (1, 0, 0)]], + ) + tree = ParentedTree.fromstring( + "(S (A (B (C (D (E (T x))))))" " (A (B (C (D (E (T x))) (N x)))))" + ) + self.assertEqual( + list(tgrep.tgrep_positions("* <<: T", [tree])), + [ + [ + (0,), + (0, 0), + (0, 0, 0), + (0, 0, 0, 0), + (0, 0, 0, 0, 0), + (1, 0, 0, 0), + (1, 0, 0, 0, 0), + ] + ], + ) + self.assertEqual( + list(tgrep.tgrep_positions("* >>: A", [tree])), + [ + [ + (0, 0), + (0, 0, 0), + (0, 0, 0, 0), + (0, 0, 0, 0, 0), + (0, 0, 0, 0, 0, 0), + (1, 0), + (1, 0, 0), + ] + ], + ) + + def test_bad_operator(self): + """ + Test error handling of undefined tgrep operators. + """ + tree = ParentedTree.fromstring("(S (A (T x)) (B (N x)))") + self.assertRaises( + tgrep.TgrepException, list, tgrep.tgrep_positions("* >>> S", [tree]) + ) + + def test_comments(self): + """ + Test that comments are correctly filtered out of tgrep search + strings. + """ + tree = ParentedTree.fromstring("(S (NN x) (NP x) (NN x))") + search1 = """ + @ NP /^NP/; + @ NN /^NN/; + @NN + """ + self.assertEqual(list(tgrep.tgrep_positions(search1, [tree])), [[(0,), (2,)]]) + search2 = """ + # macros + @ NP /^NP/; + @ NN /^NN/; + + # search string + @NN + """ + self.assertEqual(list(tgrep.tgrep_positions(search2, [tree])), [[(0,), (2,)]]) + + def test_rel_sister_nodes(self): + """ + Test matching sister nodes in a tree. + """ + tree = ParentedTree.fromstring("(S (A x) (B x) (C x))") + self.assertEqual(list(tgrep.tgrep_positions("* $. B", [tree])), [[(0,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* $.. B", [tree])), [[(0,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* $, B", [tree])), [[(2,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* $,, B", [tree])), [[(2,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* $ B", [tree])), [[(0,), (2,)]]) + + def tests_rel_indexed_children(self): + """ + Test matching nodes based on their index in their parent node. + """ + tree = ParentedTree.fromstring("(S (A x) (B x) (C x))") + self.assertEqual(list(tgrep.tgrep_positions("* >, S", [tree])), [[(0,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* >1 S", [tree])), [[(0,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* >2 S", [tree])), [[(1,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* >3 S", [tree])), [[(2,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* >' S", [tree])), [[(2,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* >-1 S", [tree])), [[(2,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* >-2 S", [tree])), [[(1,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* >-3 S", [tree])), [[(0,)]]) + tree = ParentedTree.fromstring( + "(S (D (A x) (B x) (C x)) (E (B x) (C x) (A x)) " "(F (C x) (A x) (B x)))" + ) + self.assertEqual(list(tgrep.tgrep_positions("* <, A", [tree])), [[(0,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* <1 A", [tree])), [[(0,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* <2 A", [tree])), [[(2,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* <3 A", [tree])), [[(1,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* <' A", [tree])), [[(1,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* <-1 A", [tree])), [[(1,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* <-2 A", [tree])), [[(2,)]]) + self.assertEqual(list(tgrep.tgrep_positions("* <-3 A", [tree])), [[(0,)]]) + + def test_rel_precedence(self): + """ + Test matching nodes based on precedence relations. + """ + tree = ParentedTree.fromstring( + "(S (NP (NP (PP x)) (NP (AP x)))" + " (VP (AP (X (PP x)) (Y (AP x))))" + " (NP (RC (NP (AP x)))))" + ) + self.assertEqual( + list(tgrep.tgrep_positions("* . X", [tree])), [[(0,), (0, 1), (0, 1, 0)]] + ) + self.assertEqual( + list(tgrep.tgrep_positions("* . Y", [tree])), [[(1, 0, 0), (1, 0, 0, 0)]] + ) + self.assertEqual( + list(tgrep.tgrep_positions("* .. X", [tree])), + [[(0,), (0, 0), (0, 0, 0), (0, 1), (0, 1, 0)]], + ) + self.assertEqual( + list(tgrep.tgrep_positions("* .. Y", [tree])), + [[(0,), (0, 0), (0, 0, 0), (0, 1), (0, 1, 0), (1, 0, 0), (1, 0, 0, 0)]], + ) + self.assertEqual( + list(tgrep.tgrep_positions("* , X", [tree])), [[(1, 0, 1), (1, 0, 1, 0)]] + ) + self.assertEqual( + list(tgrep.tgrep_positions("* , Y", [tree])), + [[(2,), (2, 0), (2, 0, 0), (2, 0, 0, 0)]], + ) + self.assertEqual( + list(tgrep.tgrep_positions("* ,, X", [tree])), + [[(1, 0, 1), (1, 0, 1, 0), (2,), (2, 0), (2, 0, 0), (2, 0, 0, 0)]], + ) + self.assertEqual( + list(tgrep.tgrep_positions("* ,, Y", [tree])), + [[(2,), (2, 0), (2, 0, 0), (2, 0, 0, 0)]], + ) + + def test_examples(self): + """ + Test the Basic Examples from the TGrep2 manual. + """ + tree = ParentedTree.fromstring("(S (NP (AP x)) (NP (PP x)))") + # This matches any NP node that immediately dominates a PP: + self.assertEqual(list(tgrep.tgrep_positions("NP < PP", [tree])), [[(1,)]]) + + tree = ParentedTree.fromstring("(S (NP x) (VP x) (NP (PP x)) (VP x))") + # This matches an NP that dominates a PP and is immediately + # followed by a VP: + self.assertEqual(list(tgrep.tgrep_positions("NP << PP . VP", [tree])), [[(2,)]]) + + tree = ParentedTree.fromstring( + "(S (NP (AP x)) (NP (PP x)) " "(NP (DET x) (NN x)) (VP x))" + ) + # This matches an NP that dominates a PP or is immediately + # followed by a VP: + self.assertEqual( + list(tgrep.tgrep_positions("NP << PP | . VP", [tree])), [[(1,), (2,)]] + ) + + tree = ParentedTree.fromstring( + "(S (NP (NP (PP x)) (NP (AP x)))" + " (VP (AP (NP (PP x)) (NP (AP x))))" + " (NP (RC (NP (AP x)))))" + ) + # This matches an NP that does not dominate a PP. Also, the NP + # must either have a parent that is an NP or be dominated by a + # VP: + self.assertEqual( + list(tgrep.tgrep_positions("NP !<< PP [> NP | >> VP]", [tree])), + [[(0, 1), (1, 0, 1)]], + ) + + tree = ParentedTree.fromstring( + "(S (NP (AP (PP x) (VP x))) " "(NP (AP (PP x) (NP x))) (NP x))" + ) + # This matches an NP that dominates a PP which itself is + # immediately followed by a VP. Note the use of parentheses to + # group ". VP" with the PP rather than with the NP: + self.assertEqual( + list(tgrep.tgrep_positions("NP << (PP . VP)", [tree])), [[(0,)]] + ) + + tree = ParentedTree.fromstring( + "(S (NP (DET a) (NN cat) (PP (IN on) (NP x)))" + " (NP (DET a) (NN cat) (PP (IN on) (NP x)) (PP x))" + " (NP x))" + ) + # This matches an NP whose last child is a PP that begins with + # the preposition "on": + self.assertEqual( + list(tgrep.tgrep_positions("NP <' (PP <, (IN < on))", [tree])), [[(0,)]] + ) + + tree = ParentedTree.fromstring( + "(S (S (C x) (A (B x))) (S (C x) (A x)) " "(S (D x) (A (B x))))" + ) + # The following pattern matches an S which has a child A and + # another child that is a C and that the A has a child B: + self.assertEqual( + list(tgrep.tgrep_positions("S < (A < B) < C", [tree])), [[(0,)]] + ) + + tree = ParentedTree.fromstring( + "(S (S (A (B x) (C x))) (S (S (C x) (A (B x)))))" + ) + # However, this pattern means that S has child A and that A + # has children B and C: + self.assertEqual( + list(tgrep.tgrep_positions("S < ((A < B) < C)", [tree])), [[(0,)]] + ) + + # It is equivalent to this: + self.assertEqual( + list(tgrep.tgrep_positions("S < (A < B < C)", [tree])), [[(0,)]] + ) + + def test_use_macros(self): + """ + Test defining and using tgrep2 macros. + """ + tree = ParentedTree.fromstring( + "(VP (VB sold) (NP (DET the) " + "(NN heiress)) (NP (NN deed) (PREP to) " + "(NP (DET the) (NN school) (NN house))))" + ) + self.assertEqual( + list( + tgrep.tgrep_positions( + "@ NP /^NP/;\n@ NN /^NN/;\n@NP !< @NP !$.. @NN", [tree] + ) + ), + [[(1,), (2, 2)]], + ) + # use undefined macro @CNP + self.assertRaises( + tgrep.TgrepException, + list, + tgrep.tgrep_positions( + "@ NP /^NP/;\n@ NN /^NN/;\n@CNP !< @NP !$.. @NN", [tree] + ), + ) + + def test_tokenize_node_labels(self): + """Test tokenization of labeled nodes.""" + self.assertEqual( + tgrep.tgrep_tokenize("S < @SBJ < (@VP < (@VB $.. @OBJ))"), + [ + "S", + "<", + "@SBJ", + "<", + "(", + "@VP", + "<", + "(", + "@VB", + "$..", + "@OBJ", + ")", + ")", + ], + ) + self.assertEqual( + tgrep.tgrep_tokenize("S < @SBJ=s < (@VP=v < (@VB $.. @OBJ))"), + [ + "S", + "<", + "@SBJ", + "=", + "s", + "<", + "(", + "@VP", + "=", + "v", + "<", + "(", + "@VB", + "$..", + "@OBJ", + ")", + ")", + ], + ) + + def test_tokenize_segmented_patterns(self): + """Test tokenization of segmented patterns.""" + self.assertEqual( + tgrep.tgrep_tokenize("S < @SBJ=s < (@VP=v < (@VB $.. @OBJ)) : =s .. =v"), + [ + "S", + "<", + "@SBJ", + "=", + "s", + "<", + "(", + "@VP", + "=", + "v", + "<", + "(", + "@VB", + "$..", + "@OBJ", + ")", + ")", + ":", + "=s", + "..", + "=v", + ], + ) + + def test_labeled_nodes(self): + """ + Test labeled nodes. + + Test case from Emily M. Bender. + """ + search = """ + # macros + @ SBJ /SBJ/; + @ VP /VP/; + @ VB /VB/; + @ VPoB /V[PB]/; + @ OBJ /OBJ/; + + # 1 svo + S < @SBJ=s < (@VP=v < (@VB $.. @OBJ)) : =s .. =v""" + sent1 = ParentedTree.fromstring( + "(S (NP-SBJ I) (VP (VB eat) (NP-OBJ (NNS apples))))" + ) + sent2 = ParentedTree.fromstring( + "(S (VP (VB eat) (NP-OBJ (NNS apples))) (NP-SBJ I))" + ) + search_firsthalf = search.split("\n\n")[0] + "S < @SBJ < (@VP < (@VB $.. @OBJ))" + search_rewrite = "S < (/.*SBJ/ $.. (/VP/ < (/VB/ $.. /.*OBJ/)))" + + self.assertTrue(list(tgrep.tgrep_positions(search_firsthalf, [sent1]))[0]) + self.assertTrue(list(tgrep.tgrep_positions(search, [sent1]))[0]) + self.assertTrue(list(tgrep.tgrep_positions(search_rewrite, [sent1]))[0]) + self.assertEqual( + list(tgrep.tgrep_positions(search, [sent1])), + list(tgrep.tgrep_positions(search_rewrite, [sent1])), + ) + self.assertTrue(list(tgrep.tgrep_positions(search_firsthalf, [sent2]))[0]) + self.assertFalse(list(tgrep.tgrep_positions(search, [sent2]))[0]) + self.assertFalse(list(tgrep.tgrep_positions(search_rewrite, [sent2]))[0]) + self.assertEqual( + list(tgrep.tgrep_positions(search, [sent2])), + list(tgrep.tgrep_positions(search_rewrite, [sent2])), + ) + + def test_multiple_conjs(self): + """ + Test that multiple (3 or more) conjunctions of node relations are + handled properly. + """ + sent = ParentedTree.fromstring("((A (B b) (C c)) (A (B b) (C c) (D d)))") + # search = '(A < B < C < D)' + # search_tworels = '(A < B < C)' + self.assertEqual( + list(tgrep.tgrep_positions("(A < B < C < D)", [sent])), [[(1,)]] + ) + self.assertEqual( + list(tgrep.tgrep_positions("(A < B < C)", [sent])), [[(0,), (1,)]] + ) + + def test_trailing_semicolon(self): + """ + Test that semicolons at the end of a tgrep2 search string won't + cause a parse failure. + """ + tree = ParentedTree.fromstring( + "(S (NP (DT the) (JJ big) (NN dog)) " "(VP bit) (NP (DT a) (NN cat)))" + ) + self.assertEqual(list(tgrep.tgrep_positions("NN", [tree])), [[(0, 2), (2, 1)]]) + self.assertEqual(list(tgrep.tgrep_positions("NN;", [tree])), [[(0, 2), (2, 1)]]) + self.assertEqual( + list(tgrep.tgrep_positions("NN;;", [tree])), [[(0, 2), (2, 1)]] + ) diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_util.py b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_util.py new file mode 100644 index 0000000000000000000000000000000000000000..4709e843ca56ae33d38849076b8f3a8a39665b2a --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/test_util.py @@ -0,0 +1,82 @@ +import pytest + +from nltk.util import everygrams + + +@pytest.fixture +def everygram_input(): + """Form test data for tests.""" + return iter(["a", "b", "c"]) + + +def test_everygrams_without_padding(everygram_input): + expected_output = [ + ("a",), + ("a", "b"), + ("a", "b", "c"), + ("b",), + ("b", "c"), + ("c",), + ] + output = list(everygrams(everygram_input)) + assert output == expected_output + + +def test_everygrams_max_len(everygram_input): + expected_output = [ + ("a",), + ("a", "b"), + ("b",), + ("b", "c"), + ("c",), + ] + output = list(everygrams(everygram_input, max_len=2)) + assert output == expected_output + + +def test_everygrams_min_len(everygram_input): + expected_output = [ + ("a", "b"), + ("a", "b", "c"), + ("b", "c"), + ] + output = list(everygrams(everygram_input, min_len=2)) + assert output == expected_output + + +def test_everygrams_pad_right(everygram_input): + expected_output = [ + ("a",), + ("a", "b"), + ("a", "b", "c"), + ("b",), + ("b", "c"), + ("b", "c", None), + ("c",), + ("c", None), + ("c", None, None), + (None,), + (None, None), + (None,), + ] + output = list(everygrams(everygram_input, max_len=3, pad_right=True)) + assert output == expected_output + + +def test_everygrams_pad_left(everygram_input): + expected_output = [ + (None,), + (None, None), + (None, None, "a"), + (None,), + (None, "a"), + (None, "a", "b"), + ("a",), + ("a", "b"), + ("a", "b", "c"), + ("b",), + ("b", "c"), + ("c",), + ] + output = list(everygrams(everygram_input, max_len=3, pad_left=True)) + assert output == expected_output diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_bleu.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_bleu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ea5bedbe9e89f39405be852f741f159aa4738e4 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_bleu.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_gdfa.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_gdfa.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4f44f42dc6d4505dfd452896924d568ca912ba1 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_gdfa.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm1.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm1.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f353cada5d3934ed7b488aa9afb04ee76de1542 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm1.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm3.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94d3cae01089a433aeb7f76c4b879000ed7fd337 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm3.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm_model.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3eac82b6cc4ff4bb9701fb1a26253adf96b2dae5 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_ibm_model.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_meteor.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_meteor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c82f9d939cd8d28612399c15e5d921daccfb5063 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_meteor.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_nist.cpython-310.pyc b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_nist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc72dce8bf5d7034fc7c36d8fc173d80d3c8eb19 Binary files /dev/null and b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/__pycache__/test_nist.cpython-310.pyc differ diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm3.py b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm3.py new file mode 100644 index 0000000000000000000000000000000000000000..3e69211a273d203f506656675d0e8e68de7575ee --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/unit/translate/test_ibm3.py @@ -0,0 +1,105 @@ +""" +Tests for IBM Model 3 training methods +""" + +import unittest +from collections import defaultdict + +from nltk.translate import AlignedSent, IBMModel, IBMModel3 +from nltk.translate.ibm_model import AlignmentInfo + + +class TestIBMModel3(unittest.TestCase): + def test_set_uniform_distortion_probabilities(self): + # arrange + corpus = [ + AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), + AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), + ] + model3 = IBMModel3(corpus, 0) + + # act + model3.set_uniform_probabilities(corpus) + + # assert + # expected_prob = 1.0 / length of target sentence + self.assertEqual(model3.distortion_table[1][0][3][2], 1.0 / 2) + self.assertEqual(model3.distortion_table[4][2][2][4], 1.0 / 4) + + def test_set_uniform_distortion_probabilities_of_non_domain_values(self): + # arrange + corpus = [ + AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), + AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), + ] + model3 = IBMModel3(corpus, 0) + + # act + model3.set_uniform_probabilities(corpus) + + # assert + # examine i and j values that are not in the training data domain + self.assertEqual(model3.distortion_table[0][0][3][2], IBMModel.MIN_PROB) + self.assertEqual(model3.distortion_table[9][2][2][4], IBMModel.MIN_PROB) + self.assertEqual(model3.distortion_table[2][9][2][4], IBMModel.MIN_PROB) + + def test_prob_t_a_given_s(self): + # arrange + src_sentence = ["ich", "esse", "ja", "gern", "räucherschinken"] + trg_sentence = ["i", "love", "to", "eat", "smoked", "ham"] + corpus = [AlignedSent(trg_sentence, src_sentence)] + alignment_info = AlignmentInfo( + (0, 1, 4, 0, 2, 5, 5), + [None] + src_sentence, + ["UNUSED"] + trg_sentence, + [[3], [1], [4], [], [2], [5, 6]], + ) + + distortion_table = defaultdict( + lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(float))) + ) + distortion_table[1][1][5][6] = 0.97 # i -> ich + distortion_table[2][4][5][6] = 0.97 # love -> gern + distortion_table[3][0][5][6] = 0.97 # to -> NULL + distortion_table[4][2][5][6] = 0.97 # eat -> esse + distortion_table[5][5][5][6] = 0.97 # smoked -> räucherschinken + distortion_table[6][5][5][6] = 0.97 # ham -> räucherschinken + + translation_table = defaultdict(lambda: defaultdict(float)) + translation_table["i"]["ich"] = 0.98 + translation_table["love"]["gern"] = 0.98 + translation_table["to"][None] = 0.98 + translation_table["eat"]["esse"] = 0.98 + translation_table["smoked"]["räucherschinken"] = 0.98 + translation_table["ham"]["räucherschinken"] = 0.98 + + fertility_table = defaultdict(lambda: defaultdict(float)) + fertility_table[1]["ich"] = 0.99 + fertility_table[1]["esse"] = 0.99 + fertility_table[0]["ja"] = 0.99 + fertility_table[1]["gern"] = 0.99 + fertility_table[2]["räucherschinken"] = 0.999 + fertility_table[1][None] = 0.99 + + probabilities = { + "p1": 0.167, + "translation_table": translation_table, + "distortion_table": distortion_table, + "fertility_table": fertility_table, + "alignment_table": None, + } + + model3 = IBMModel3(corpus, 0, probabilities) + + # act + probability = model3.prob_t_a_given_s(alignment_info) + + # assert + null_generation = 5 * pow(0.167, 1) * pow(0.833, 4) + fertility = 1 * 0.99 * 1 * 0.99 * 1 * 0.99 * 1 * 0.99 * 2 * 0.999 + lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98 + distortion = 0.97 * 0.97 * 0.97 * 0.97 * 0.97 * 0.97 + expected_probability = ( + null_generation * fertility * lexical_translation * distortion + ) + self.assertEqual(round(probability, 4), round(expected_probability, 4)) diff --git a/openflamingo/lib/python3.10/site-packages/nltk/test/wsd.doctest b/openflamingo/lib/python3.10/site-packages/nltk/test/wsd.doctest new file mode 100644 index 0000000000000000000000000000000000000000..704b068cd0ad31e6ae59d295e8e9f041d895ec93 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/nltk/test/wsd.doctest @@ -0,0 +1,68 @@ +.. Copyright (C) 2001-2024 NLTK Project +.. For license information, see LICENSE.TXT + +.. -*- coding: utf-8 -*- + +========================= +Word Sense Disambiguation +========================= + + +Lesk Algorithm +-------------- + + +Performs the classic Lesk algorithm for Word Sense Disambiguation (WSD) using +a the definitions of the ambiguous word. + +Given an ambiguous word and the context in which the word occurs, Lesk returns +a Synset with the highest number of overlapping words between the context +sentence and different definitions from each Synset. + + >>> from nltk.wsd import lesk + >>> sent = ['I', 'went', 'to', 'the', 'bank', 'to', 'deposit', 'money', '.'] + + >>> print(lesk(sent, 'bank', 'n')) + Synset('savings_bank.n.02') + + >>> print(lesk(sent, 'bank')) + Synset('savings_bank.n.02') + +The definitions for "bank" are: + + >>> from nltk.corpus import wordnet as wn + >>> for ss in wn.synsets('bank'): + ... print(ss, ss.definition()) + ... + Synset('bank.n.01') sloping land (especially the slope beside a body of water) + Synset('depository_financial_institution.n.01') a financial institution that accepts deposits and channels the money into lending activities + Synset('bank.n.03') a long ridge or pile + Synset('bank.n.04') an arrangement of similar objects in a row or in tiers + Synset('bank.n.05') a supply or stock held in reserve for future use (especially in emergencies) + Synset('bank.n.06') the funds held by a gambling house or the dealer in some gambling games + Synset('bank.n.07') a slope in the turn of a road or track; the outside is higher than the inside in order to reduce the effects of centrifugal force + Synset('savings_bank.n.02') a container (usually with a slot in the top) for keeping money at home + Synset('bank.n.09') a building in which the business of banking transacted + Synset('bank.n.10') a flight maneuver; aircraft tips laterally about its longitudinal axis (especially in turning) + Synset('bank.v.01') tip laterally + Synset('bank.v.02') enclose with a bank + Synset('bank.v.03') do business with a bank or keep an account at a bank + Synset('bank.v.04') act as the banker in a game or in gambling + Synset('bank.v.05') be in the banking business + Synset('deposit.v.02') put into a bank account + Synset('bank.v.07') cover with ashes so to control the rate of burning + Synset('trust.v.01') have confidence or faith in + +Test disambiguation of POS tagged `able`. + + >>> [(s, s.pos()) for s in wn.synsets('able')] + [(Synset('able.a.01'), 'a'), (Synset('able.s.02'), 's'), (Synset('able.s.03'), 's'), (Synset('able.s.04'), 's')] + >>> sent = 'people should be able to marry a person of their choice'.split() + >>> lesk(sent, 'able') + Synset('able.s.04') + >>> lesk(sent, 'able', pos='a') + Synset('able.a.01') + +Test behavior if there is are no matching senses. + + >>> lesk('John loves Mary'.split(), 'loves', synsets=[])