diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/CUDAPluggableAllocator.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/CUDAPluggableAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..6a0f3e0d573ee8f279f98ae10f7b2f68c8187538 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/CUDAPluggableAllocator.h @@ -0,0 +1,142 @@ +#pragma once + +#include +#include +#include +#include + +#include + +#include +#include + +namespace torch::cuda::CUDAPluggableAllocator { + +#if defined(TORCH_HIP_VERSION) +using streamType = c10::hip::HIPStream; +#else +using streamType = c10::cuda::CUDAStream; +#endif + +std::shared_ptr +getCurrentAllocator(); +std::shared_ptr +createCustomAllocator( + std::function alloc_fn, + std::function free_fn); +void changeCurrentAllocator( + const std::shared_ptr& + allocator); + +struct _AllocationMetadata { + _AllocationMetadata(); + _AllocationMetadata(size_t size, int device_idx, cudaStream_t stream); + size_t size; + int device_idx; + cudaStream_t stream; +}; + +struct CUDAPluggableAllocator + : public c10::cuda::CUDACachingAllocator::CUDAAllocator { + CUDAPluggableAllocator( + std::function alloc_fn, + std::function free_fn); + + CUDAPluggableAllocator(CUDAPluggableAllocator& other); + + void set_init_fn(std::function init_fn); + + void set_reset_fn(std::function reset_fn); + + void set_memory_fraction_fn( + std::function memory_fraction_fn); + + void set_base_alloc_fn(std::function base_alloc_fn); + + void set_record_stream_fn( + std::function record_stream_fn); + + void set_begin_allocate_stream_to_pool( + std::function + capture_begin_fn); + + void set_end_allocate_stream_to_pool_fn( + std::function capture_about_to_end_fn); + + void set_release_pool( + std::function capture_destroy_fn); + + void* malloc(size_t size, int device, cudaStream_t stream); + + c10::DataPtr allocate(size_t size) const override; + c10::DeleterFnPtr raw_deleter() const override; + + void* raw_alloc(size_t nbytes) override; + void* raw_alloc_with_stream(size_t nbytes, cudaStream_t stream) override; + void raw_delete(void* ptr) override; + void init(int device_count) override; + bool initialized() override; + void setMemoryFraction(double fraction, int device) override; + void emptyCache() override; + void cacheInfo(int dev_id, size_t* largestBlock) override; + void* getBaseAllocation(void* ptr, size_t* size) override; + + void recordStream(const c10::DataPtr&, streamType stream) override; + + c10::cuda::CUDACachingAllocator::DeviceStats getDeviceStats( + int device) override; + void resetAccumulatedStats(int device) override; + void resetPeakStats(int device) override; + c10::cuda::CUDACachingAllocator::SnapshotInfo snapshot() override; + void beginAllocateStreamToPool( + int device, + cudaStream_t stream, + c10::cuda::MempoolId_t mempool_id) override; + void endAllocateStreamToPool(int device, cudaStream_t stream) override; + void releasePool(int device, c10::cuda::MempoolId_t mempool_id) override; + std::shared_ptr getIpcDevPtr(std::string handle) override; + void recordHistory( + bool enabled, + c10::cuda::CUDACachingAllocator::CreateContextFn context_recorder, + size_t alloc_trace_max_entries, + c10::cuda::CUDACachingAllocator::RecordContext when) override; + void attachOutOfMemoryObserver( + c10::cuda::CUDACachingAllocator::OutOfMemoryObserver observer) override; + void attachAllocatorTraceTracker( + c10::cuda::CUDACachingAllocator::AllocatorTraceTracker tracker) override; + std::shared_ptr + getCheckpointState(int device, at::cuda::MempoolId_t id) override; + c10::cuda::CUDACachingAllocator::CheckpointDelta setCheckpointPoolState( + int device, + std::shared_ptr pps) + override; + void enablePeerAccess(int dev, int dev_to_access) override; + cudaError_t memcpyAsync( + void* dst, + int dstDevice, + const void* src, + int srcDevice, + size_t count, + cudaStream_t stream, + bool p2p_enabled) override; + std::string name() override; + + protected: + std::function alloc_fn_; + std::function free_fn_; + std::function init_fn_; + std::function reset_fn_; + std::function memory_fraction_fn_; + std::function base_alloc_fn_; + std::function record_stream_fn_; + std::function + begin_allocate_stream_to_pool_fn_; + std::function end_allocate_stream_to_pool_fn_; + std::function relase_pool_fn_; + std::mutex allocator_mutex_; + // We do the bookeeping here in order to simplify custom allocators + std::unordered_map allocation_metadata_; + + bool initialized_ = false; +}; +} // namespace torch::cuda::CUDAPluggableAllocator diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Event.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Event.h new file mode 100644 index 0000000000000000000000000000000000000000..5c4d95b285997fa4ddfce57423e0b264dde5898a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Event.h @@ -0,0 +1,18 @@ +#ifndef THCP_EVENT_INC +#define THCP_EVENT_INC + +#include +#include + +struct THCPEvent { + PyObject_HEAD at::cuda::CUDAEvent cuda_event; +}; +extern PyObject* THCPEventClass; + +void THCPEvent_init(PyObject* module); + +inline bool THCPEvent_Check(PyObject* obj) { + return THCPEventClass && PyObject_IsInstance(obj, THCPEventClass); +} + +#endif // THCP_EVENT_INC diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Module.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Module.h new file mode 100644 index 0000000000000000000000000000000000000000..23d9079f146f54eb0999715b350b9d22d6652752 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Module.h @@ -0,0 +1,12 @@ +#ifndef THCP_CUDA_MODULE_INC +#define THCP_CUDA_MODULE_INC + +void THCPModule_setDevice(int idx); +PyObject* THCPModule_getDevice_wrap(PyObject* self); +PyObject* THCPModule_setDevice_wrap(PyObject* self, PyObject* arg); +PyObject* THCPModule_getDeviceName_wrap(PyObject* self, PyObject* arg); +PyObject* THCPModule_getDriverVersion(PyObject* self); +PyObject* THCPModule_isDriverSufficient(PyObject* self); +PyObject* THCPModule_getCurrentBlasHandle_wrap(PyObject* self); + +#endif diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Stream.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Stream.h new file mode 100644 index 0000000000000000000000000000000000000000..6175ac2ea03284b03cff70d77a1c37c7aa91215a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Stream.h @@ -0,0 +1,19 @@ +#ifndef THCP_STREAM_INC +#define THCP_STREAM_INC + +#include +#include +#include + +struct THCPStream : THPStream { + at::cuda::CUDAStream cuda_stream; +}; +extern PyObject* THCPStreamClass; + +void THCPStream_init(PyObject* module); + +inline bool THCPStream_Check(PyObject* obj) { + return THCPStreamClass && PyObject_IsInstance(obj, THCPStreamClass); +} + +#endif // THCP_STREAM_INC diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/THCP.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/THCP.h new file mode 100644 index 0000000000000000000000000000000000000000..697a66dc3ee91a22ae2503852db04dbba2fc74d4 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/THCP.h @@ -0,0 +1,10 @@ +#ifndef THCP_H +#define THCP_H + +#include +#include +#include +#include +#include + +#endif diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/comm.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/comm.h new file mode 100644 index 0000000000000000000000000000000000000000..cf89b365d0ce4b6af51550825b5a45387eb281f3 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/comm.h @@ -0,0 +1,52 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include + +namespace torch::cuda { + +using tensor_list2d = std::vector>; + +TORCH_CUDA_CU_API std::vector& broadcast_out( + const at::Tensor& tensor, + std::vector& out_tensors); +TORCH_CUDA_CU_API std::vector broadcast( + const at::Tensor& tensor, + at::IntArrayRef devices); +TORCH_CUDA_CU_API tensor_list2d broadcast_coalesced( + at::TensorList tensors, + at::IntArrayRef devices, + size_t buffer_size); + +TORCH_CUDA_CU_API std::vector& scatter_out( + const at::Tensor& tensor, + std::vector& out_tensors, + int64_t dim = 0, + const c10::optional>>& + streams = c10::nullopt); + +TORCH_CUDA_CU_API std::vector scatter( + const at::Tensor& tensor, + at::IntArrayRef devices, + const c10::optional>& chunk_sizes = c10::nullopt, + int64_t dim = 0, + const c10::optional>>& + streams = c10::nullopt); + +TORCH_CUDA_CU_API at::Tensor& gather_out( + at::TensorList tensors, + at::Tensor& out_tensor, + int64_t dim); + +TORCH_CUDA_CU_API at::Tensor gather( + at::TensorList tensors, + int64_t dim, + c10::optional destination_index); + +} // namespace torch::cuda diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/device_set.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/device_set.h new file mode 100644 index 0000000000000000000000000000000000000000..82fa34294d363f29c8a0d2d4965c0ed4dadf43d7 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/device_set.h @@ -0,0 +1,10 @@ +#pragma once + +#include + +namespace torch { + +static constexpr size_t MAX_CUDA_DEVICES = 64; +using device_set = std::bitset; + +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/memory_snapshot.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/memory_snapshot.h new file mode 100644 index 0000000000000000000000000000000000000000..dfffbcb5a88e4f698db11acecc4f472a8224d09b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/memory_snapshot.h @@ -0,0 +1,26 @@ +#pragma once + +#include +#include +#include + +namespace torch::cuda { + +// C++-only versions of these, for python use +// those defined in cuda/Module.cpp which also record python state. +TORCH_CUDA_CU_API void _record_memory_history( + bool enabled, + bool record_context = true, + int64_t trace_alloc_max_entries = 1, + bool trace_alloc_record_context = false, + bool record_cpp_context = false); + +TORCH_CUDA_CU_API void _record_memory_history( + c10::optional enabled = "all", + c10::optional context = "all", + std::string stacks = "all", + size_t max_entries = UINT64_MAX); + +TORCH_CUDA_CU_API std::string _memory_snapshot_pickled(); + +} // namespace torch::cuda diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/nccl.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/nccl.h new file mode 100644 index 0000000000000000000000000000000000000000..14664cc0b9fd155e04006e86af8f0e0c4da181c9 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/nccl.h @@ -0,0 +1,218 @@ +#pragma once + +#include +#include +#include + +#include +#include + +// NCCL BFloat16 is enabled only for CUDA 11+ and NCCL versions 2.10+, or for +// HIP 3.1+ +#if defined(__CUDA_BF16_TYPES_EXIST__) +#define HAS_NCCL_BF16_DATATYPE \ + ((NCCL_MAJOR > 2) || (NCCL_MAJOR == 2) && (NCCL_MINOR >= 10)) +#elif defined(USE_ROCM) && (TORCH_HIP_VERSION >= 301) +#define HAS_NCCL_BF16_DATATYPE 1 +#else +#define HAS_NCCL_BF16_DATATYPE 0 +#endif + +namespace torch::cuda::nccl { + +/* The following are copied from and redefined in torch::cuda::nccl + * namespace */ +/* pytorch should only use the following definition within pytorch scope */ + +/* Opaque handle to communicator to ncclComm*, this will reinterpret as ncclComm + * in nccl.cpp */ +typedef void* ncclComm_t; + +/** redefine nccl unique ID in torch scope. this should be identical to native + * nccl impp. */ +#define NCCL_UNIQUE_ID_BYTES 128 +// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) +typedef struct { + char internal[NCCL_UNIQUE_ID_BYTES]; +} ncclUniqueId; + +/* Error type */ +enum class ncclResult { + Success = 0, + UnhandledCudaError = 1, + SystemError = 2, + InternalError = 3, + InvalidArgument = 4, + InvalidUsage = 5, + NumResults = 6, + InProgress = 7 +}; + +/* Reduction operation selector */ +enum class ncclRedOp { Sum = 0, Prod = 1, Max = 2, Min = 3, NumOps = 4 }; + +/* Data types */ +enum class ncclDataType { + Int8 = 0, + Char = 0, + Uint8 = 1, + Int32 = 2, + Int = 2, + Uint32 = 3, + Int64 = 4, + Uint64 = 5, + Float16 = 6, + Half = 6, + Float32 = 7, + Float = 7, + Float64 = 8, + Double = 8, + Bfloat16 = 9, + NumTypes = 10 +}; + +// RAII helper class to manage NCCL group API and CUDA free mutex. +// The destructor is allowed to throw since this helper class only +// manages group and lock lifetimes. +struct AutoNcclGroup { + AutoNcclGroup(); + AutoNcclGroup(std::vector& comms, bool comm_nonblocking); + ~AutoNcclGroup() noexcept(false); + std::vector comms_; + bool comm_nonblocking_; +}; + +// NOTE: this is exposed only so that python_nccl.cpp can some of these helpers. +// Don't use them outside of these files. +namespace detail { + +TORCH_CUDA_CPP_API void throw_nccl_error(ncclResult status); + +static inline void NCCL_CHECK(ncclResult status) { + if (status != ncclResult::Success) { + throw_nccl_error(status); + } +} + +TORCH_CUDA_CPP_API at::ArrayRef get_communicators( + at::TensorList inputs); +TORCH_CUDA_CPP_API void check_inputs( + at::TensorList inputs, + at::TensorList outputs, + int input_multiplier, + int output_multiplier); +TORCH_CUDA_CPP_API void check_inputs( + at::TensorList inputs, + const at::Tensor& output, + int root, + int input_multiplier, + int output_multiplier); + +} // namespace detail + +using comm_list = std::vector; +using stream_list = std::vector>; + +TORCH_CUDA_CPP_API std::uint64_t version(); +TORCH_CUDA_CPP_API const char* version_suffix(); + +bool is_available(at::TensorList tensors); + +TORCH_CUDA_CPP_API void get_unique_id(ncclUniqueId& id); +TORCH_CUDA_CPP_API ncclComm_t +comm_init_rank(int nranks, const ncclUniqueId& comm_id, int rank); +TORCH_CUDA_CPP_API void comm_destroy(ncclComm_t comm); + +TORCH_CUDA_CPP_API void broadcast( + at::TensorList tensors, + const stream_list& streams = {}, + const comm_list& user_comms = {}); + +size_t get_max_count(); + +TORCH_CUDA_CPP_API void reduce( + const std::vector& inputs, + at::Tensor& output, + int32_t root = 0, + int32_t op = static_cast(ncclRedOp::Sum), + const stream_list& streams = {}, + const comm_list& user_comms = {}); + +TORCH_CUDA_CPP_API void reduce( + std::vector& inputs, + int32_t root = 0, + int32_t op = static_cast(ncclRedOp::Sum), + const stream_list& streams = {}, + const comm_list& user_comms = {}); + +TORCH_CUDA_CPP_API void all_reduce( + const std::vector& inputs, + std::vector& outputs, + int32_t op = static_cast(ncclRedOp::Sum), + const stream_list& streams = {}, + const comm_list& user_comms = {}); + +TORCH_CUDA_CPP_API void reduce_scatter( + const std::vector& inputs, + std::vector& outputs, + int32_t op = static_cast(ncclRedOp::Sum), + const stream_list& streams = {}, + const comm_list& user_comms = {}); + +TORCH_CUDA_CPP_API void scatter( + const std::vector& inputs, + at::Tensor& outputs, + ncclComm_t comm, + at::cuda::CUDAStream& stream, + int32_t root = 0); + +TORCH_CUDA_CPP_API void all_gather( + const std::vector& inputs, + std::vector& outputs, + const stream_list& streams = {}, + const comm_list& user_comms = {}); + +TORCH_CUDA_CPP_API void gather( + const at::Tensor& inputs, + std::vector& outputs, + ncclComm_t comm, + at::cuda::CUDAStream& stream, + int32_t root = 0); + +TORCH_CUDA_CPP_API void all2all_single_equal_split( + at::Tensor& input, + at::Tensor& output, + int size, + ncclComm_t comm, + at::cuda::CUDAStream& stream); + +TORCH_CUDA_CPP_API void all2all_single_unequal_split( + void* sendbuff, + const size_t* sendcounts, + const size_t* senddispls, + void* recvbuff, + const size_t* recvcounts, + const size_t* recvdispls, + size_t size, + c10::ScalarType type, + ncclComm_t comm, + at::cuda::CUDAStream& stream); + +TORCH_CUDA_CPP_API void all2all( + std::vector& outputTensors, + std::vector& inputTensors, + ncclComm_t _comm, + at::cuda::CUDAStream& stream); + +TORCH_CUDA_CPP_API void send( + const at::Tensor& input, + ncclComm_t comm, + at::cuda::CUDAStream stream, + int dst); + +TORCH_CUDA_CPP_API void recv( + at::Tensor& output, + ncclComm_t comm, + at::cuda::CUDAStream stream, + int src); +} // namespace torch::cuda::nccl diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_comm.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_comm.h new file mode 100644 index 0000000000000000000000000000000000000000..e87ae053fbe7fc59d1713cf8b148dfe52cbd01dc --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_comm.h @@ -0,0 +1,7 @@ +#pragma once + +namespace torch::cuda::python { + +void initCommMethods(PyObject* module); + +} // namespace torch::cuda::python diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_nccl.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_nccl.h new file mode 100644 index 0000000000000000000000000000000000000000..ebaa666a22d2cff60e2ef2a2701003d0ca61a8e4 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_nccl.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +PyObject* THCPModule_nccl_version(PyObject* self, PyObject* args); +PyObject* THCPModule_nccl_version_suffix(PyObject* self, PyObject* args); +PyObject* THCPModule_nccl_unique_id(PyObject* self, PyObject* args); +PyObject* THCPModule_nccl_init_rank(PyObject* self, PyObject* args); +PyObject* THCPModule_nccl_reduce(PyObject* self, PyObject* args); +PyObject* THCPModule_nccl_all_reduce(PyObject* self, PyObject* args); +PyObject* THCPModule_nccl_broadcast(PyObject* self, PyObject* args); +PyObject* THCPModule_nccl_all_gather(PyObject* self, PyObject* args); +PyObject* THCPModule_nccl_reduce_scatter(PyObject* self, PyObject* args); diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_log.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_log.h new file mode 100644 index 0000000000000000000000000000000000000000..c3a0fa303c7c566d72a45306d4f063bd52accadd --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_log.h @@ -0,0 +1,128 @@ +#pragma once +#include +#include +#include +#include +#include + +// `TorchScript` offers a simple logging facility that can enabled by setting an +// environment variable `PYTORCH_JIT_LOG_LEVEL`. + +// Logging is enabled on a per file basis. To enable logging in +// `dead_code_elimination.cpp`, `PYTORCH_JIT_LOG_LEVEL` should be +// set to `dead_code_elimination.cpp` or, simply, to `dead_code_elimination` +// (i.e. `PYTORCH_JIT_LOG_LEVEL=dead_code_elimination`). + +// Multiple files can be logged by separating each file name with a colon `:` as +// in the following example, +// `PYTORCH_JIT_LOG_LEVEL=dead_code_elimination:guard_elimination` + +// There are 3 logging levels available for your use ordered by the detail level +// from lowest to highest. + +// * `GRAPH_DUMP` should be used for printing entire graphs after optimization +// passes +// * `GRAPH_UPDATE` should be used for reporting graph transformations (i.e. +// node deletion, constant folding, etc) +// * `GRAPH_DEBUG` should be used for providing information useful for debugging +// the internals of a particular optimization pass or analysis + +// The default logging level is `GRAPH_DUMP` meaning that only `GRAPH_DUMP` +// statements will be enabled when one specifies a file(s) in +// `PYTORCH_JIT_LOG_LEVEL`. + +// `GRAPH_UPDATE` can be enabled by prefixing a file name with an `>` as in +// `>alias_analysis`. +// `GRAPH_DEBUG` can be enabled by prefixing a file name with an `>>` as in +// `>>alias_analysis`. +// `>>>` is also valid and **currently** is equivalent to `GRAPH_DEBUG` as there +// is no logging level that is higher than `GRAPH_DEBUG`. + +namespace torch { +namespace jit { + +struct Node; +struct Graph; + +enum class JitLoggingLevels { + GRAPH_DUMP = 0, + GRAPH_UPDATE, + GRAPH_DEBUG, +}; + +TORCH_API std::string get_jit_logging_levels(); + +TORCH_API void set_jit_logging_levels(std::string level); + +TORCH_API void set_jit_logging_output_stream(std::ostream& out_stream); + +TORCH_API std::ostream& get_jit_logging_output_stream(); + +TORCH_API std::string getHeader(const Node* node); + +TORCH_API std::string log_function(const std::shared_ptr& graph); + +TORCH_API ::torch::jit::JitLoggingLevels jit_log_level(); + +// Prefix every line in a multiline string \p IN_STR with \p PREFIX. +TORCH_API std::string jit_log_prefix( + const std::string& prefix, + const std::string& in_str); + +TORCH_API std::string jit_log_prefix( + ::torch::jit::JitLoggingLevels level, + const char* fn, + int l, + const std::string& in_str); + +TORCH_API bool is_enabled( + const char* cfname, + ::torch::jit::JitLoggingLevels level); + +TORCH_API std::ostream& operator<<( + std::ostream& out, + ::torch::jit::JitLoggingLevels level); + +#define JIT_LOG(level, ...) \ + if (is_enabled(__FILE__, level)) { \ + ::torch::jit::get_jit_logging_output_stream() \ + << ::torch::jit::jit_log_prefix( \ + level, __FILE__, __LINE__, ::c10::str(__VA_ARGS__)); \ + } + +// tries to reconstruct original python source +#define SOURCE_DUMP(MSG, G) \ + JIT_LOG( \ + ::torch::jit::JitLoggingLevels::GRAPH_DUMP, \ + MSG, \ + "\n", \ + ::torch::jit::log_function(G)); +// use GRAPH_DUMP for dumping graphs after optimization passes +#define GRAPH_DUMP(MSG, G) \ + JIT_LOG( \ + ::torch::jit::JitLoggingLevels::GRAPH_DUMP, MSG, "\n", (G)->toString()); +// use GRAPH_UPDATE for reporting graph transformations (i.e. node deletion, +// constant folding, CSE) +#define GRAPH_UPDATE(...) \ + JIT_LOG(::torch::jit::JitLoggingLevels::GRAPH_UPDATE, __VA_ARGS__); +// use GRAPH_DEBUG to provide information useful for debugging a particular opt +// pass +#define GRAPH_DEBUG(...) \ + JIT_LOG(::torch::jit::JitLoggingLevels::GRAPH_DEBUG, __VA_ARGS__); +// use GRAPH_EXPORT to export a graph so that the IR can be loaded by a script +#define GRAPH_EXPORT(MSG, G) \ + JIT_LOG( \ + ::torch::jit::JitLoggingLevels::GRAPH_DEBUG, \ + MSG, \ + "\n\n", \ + (G)->toString(), \ + ""); + +#define GRAPH_DUMP_ENABLED \ + (is_enabled(__FILE__, ::torch::jit::JitLoggingLevels::GRAPH_DUMP)) +#define GRAPH_UPDATE_ENABLED \ + (is_enabled(__FILE__, ::torch::jit::JitLoggingLevels::GRAPH_UPDATE)) +#define GRAPH_DEBUG_ENABLED \ + (is_enabled(__FILE__, ::torch::jit::JitLoggingLevels::GRAPH_DEBUG)) +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_opt_limit.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_opt_limit.h new file mode 100644 index 0000000000000000000000000000000000000000..a5bb535c9c6fe708bbbf51625182a725425f1dc8 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_opt_limit.h @@ -0,0 +1,39 @@ +#pragma once +#include +#include +#include + +// `TorchScript` offers a simple optimization limit checker +// that can be configured through environment variable `PYTORCH_JIT_OPT_LIMIT`. +// The purpose is to limit how many optimization you can make per pass. +// This is useful for debugging any passes. + +// Opt limit checker is enabled on a per file basis (hence per pass). For +// example, in `constant_propagation.cpp`, `PYTORCH_JIT_OPT_LIMIT` should be set +// to `constant_propagation=` or, simply, to +// `constant_propagation=` where is the number of +// optimizations you want to make for the pass. (i.e. +// `PYTORCH_JIT_OPT_LIMIT="constant_propagation="`). + +// Multiple files can be configured by separating each file name with a colon +// `:` as in the following example, +// `PYTORCH_JIT_OPT_LIMIT="constant_propagation=:dead_code_elimination="` + +// You can call opt limiter by calling JIT_OPT_ALLOWED. It will return true if +// we haven't reached the optimization limit yet. Otherwise, it will return +// false. Typical usage: + +// if (!JIT_OPT_ALLOWED) { +// GRAPH_DUMP(...); //supplied from jit_log +// return; +// } + +namespace torch { +namespace jit { + +TORCH_API bool opt_limit(const char* pass_name); + +#define JIT_OPT_ALLOWED opt_limit(__FILE__) + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/add_if_then_else.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/add_if_then_else.h new file mode 100644 index 0000000000000000000000000000000000000000..c6b3f9376d6b328f1d7eaf6cda14b007a6fc8e45 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/add_if_then_else.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API bool AddIfThenElseOp(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize.h new file mode 100644 index 0000000000000000000000000000000000000000..46d90d1a515f66fa19ac37c6d8621ba5f6e687de --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize.h @@ -0,0 +1,22 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API std::shared_ptr Canonicalize( + const std::shared_ptr& graph, + bool keep_unique_names = true); + +TORCH_API void CanonicalizeOutputs(std::shared_ptr& graph); + +TORCH_API c10::optional firstOrLastUse(Value* v, bool find_first); + +TORCH_API bool isBeforeOrAfter( + const Use& a, + const Use& b, + bool checking_before); + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/check_strict_fusion.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/check_strict_fusion.h new file mode 100644 index 0000000000000000000000000000000000000000..87e86d8a7e4b2965a16422cd952418aef7e7db5e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/check_strict_fusion.h @@ -0,0 +1,12 @@ + +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void CheckStrictFusion(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mkldnn_rewrite.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mkldnn_rewrite.h new file mode 100644 index 0000000000000000000000000000000000000000..30d02332429a81ec50c6dc3fae8ab7cddff88714 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/mkldnn_rewrite.h @@ -0,0 +1,34 @@ +#pragma once + +#include +#include +#include +#include + +#if AT_MKLDNN_ENABLED() + +#include + +#endif // AT_MKLDNN_ENABLED() + +namespace torch { +namespace jit { + +#if AT_MKLDNN_ENABLED() + +namespace mkldnn { + +const static std::map> + fusion_rewrite_map = { + {"none", {}}, + {"relu", {}}, +}; + +} // namespace mkldnn + +#endif // AT_MKLDNN_ENABLED() + +void FuseConvWithEltwise(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/normalize_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/normalize_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4d630392ca47df0d0a32ef1bf5d25bbb4a41c163 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/normalize_ops.h @@ -0,0 +1,18 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// This pass converts aten ops to a normalized form. It is +// run immediately after IR generation in both the tracer and compiler, +// so downstream consumers of the IR do not need handle ops in their +// pre-normalized form. +// Currently only handles normalization of op aliases. +TORCH_API void NormalizeOps(const std::shared_ptr& graph); + +const std::unordered_map& getOperatorAliasMap(); + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/pass_manager.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/pass_manager.h new file mode 100644 index 0000000000000000000000000000000000000000..8585c6ecdb3de5c0ce1a3a72be9d722d2b423f0d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/pass_manager.h @@ -0,0 +1,136 @@ +#pragma once + +#include + +/* `getCustomPrePasses()` returns a vector of passes that will be executed + * after differentiation but before any fusion. This is the de-facto location + * for compiler backends to insert passes. + * + * `getCustomPostPasses()` returns a vector of passes that will be + * executed after differentiation and after fusion (if any). This is the + * location for fusion cleanup passes if they are needed. + * + * Static registration of a pass can be done by creating a global + * `Register{Pre,Post}Pass r(Pass)` variable in a compilation unit. + * + * pass_manager.h uses a Meyer's singleton to store a vector of `Pass`es, which + * modify the IR graph in place. + */ + +namespace torch { +namespace jit { + +// A pass modifies a Graph in place. +using GraphPass = std::function&)>; + +// Since Passes are std::functions, we associate a UUID to each pass, this way +// if we want to deregister a pass, we have something to reference it by. +using GraphPassNameType = unsigned int; + +// Graph pass entries have a name associated with them +using GraphPassEntry = std::pair; + +// Return currently registered passes. Passes are stored in a static vector +TORCH_API std::vector>& +getCustomPostPasses(); +TORCH_API std::vector>& +getCustomPrePasses(); + +TORCH_API GraphPassNameType registerPostPass(GraphPass p); +TORCH_API GraphPassNameType registerPrePass(GraphPass p); + +// Look up pass by name passed in, remove it from registered passes +TORCH_API void clearPostPass(GraphPassNameType p); +TORCH_API void clearPrePass(GraphPassNameType p); + +// Remove all passes +TORCH_API void clearAllPostPasses(); +TORCH_API void clearAllPrePasses(); + +// LEGACY CALL +struct TORCH_API RegisterPostPass { + RegisterPostPass(GraphPass p); +}; + +using RegisterPass = RegisterPostPass; + +/* + * PassManager is a wrapper on the register/clear PostPass functions above. It + * will register the pass provided in "registerPass" and will hold on to its + * associated name that way clearPass can be later called and will delete the + * pass used to register when called. + * + * PassManager is templated because we want static variables based on a + * particular GraphPass. When deriving from PassManager, you should send as the + * template parameter your derived class as you would for the curiously + * recurring template pattern. This template parameter isn't actually used and + * is simply done to prevent static members from being shared across derived + * types. + */ +template +struct C10_EXPORT PassManager { + private: + // We want this class to be abstract because it's + virtual void abstract() = 0; + + protected: + /* + * isRegistered() will return if a pass has been registered + * isRegistered(true) will change the value of the internal static bool + * + * There's an internal static bool to this function to keep track of the + * state, this is so when functions are derived from this class, they don't + * have to worry about initializing the static members. + */ + static bool isRegistered(bool flip_bit = false) { + static bool val = false; + if (flip_bit) + val = !val; + return val; + } + + /* + * name() will return the name of the registered pass + * name(pass_name, true) will set the name of the pass + * Similarly to isRegistered we use an internal static variable to hold the + * name. + */ + static GraphPassNameType passID( + GraphPassNameType PassID = 0, + bool set = false) { + static GraphPassNameType pass_id = 0; + if (set) + pass_id = PassID; + return pass_id; + } + + public: + // registerPass(pass) will register the pass provided and set the + // name/isRegistered functions appropriately, it returns a bool value + // indicating whether the given pass is already registered previously. + static bool registerPass(GraphPass p) { + if (!isRegistered()) { + // If we don't already have a registered pass, register pass + // hold on to its name, change isRegistered to true + passID(registerPostPass(std::move(p)), true); + isRegistered(true); + return false; + } + return true; + } + + // Calls ClearPostPass(passID()) + static void clearPass() { + // If the pass is registered, clear it and change isRegistered to false. + if (isRegistered()) { + clearPostPass(passID()); + isRegistered(true); + } + } + + // clang-tidy requires virtual destructor; + virtual ~PassManager() = default; +}; + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/refine_tuple_types.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/refine_tuple_types.h new file mode 100644 index 0000000000000000000000000000000000000000..75f36313b3a1510dec9ec8107b1e5d1c2a781c49 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/refine_tuple_types.h @@ -0,0 +1,12 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// updates the types of tuples according to the type of their current inputs. +TORCH_API void RefineTupleTypes(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_expands.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_expands.h new file mode 100644 index 0000000000000000000000000000000000000000..8a484a839e552dc02a4a04e660d9d173e473f2c5 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_expands.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API void RemoveExpands(const std::shared_ptr& graph); + +} +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/replacement_of_old_operators.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/replacement_of_old_operators.h new file mode 100644 index 0000000000000000000000000000000000000000..1f3fbf6cac88d082a1a55ae0b9b85667f8d83561 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/replacement_of_old_operators.h @@ -0,0 +1,16 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Find the valid upgrader graph for the upgrader and cache the result +// for later lookups. Will error out if there is no valid upgrader graph +// provided for the upgrader name. +std::shared_ptr getUpgraderGraph(const std::string& upgrader_name); + +TORCH_API void ReplaceOldOperatorsWithUpgraders(std::shared_ptr graph); + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/variadic_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/variadic_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cdd89bbcc22fffffef677a570923d469a6f129bd --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/variadic_ops.h @@ -0,0 +1,31 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Try to replace an op that takes a list input with another op that takes a +// variadic number of arguments. +TORCH_API bool UseVariadicOp( + const std::shared_ptr& graph, + NodeKind op, + NodeKind variadic_op); + +TORCH_API bool RemoveListMutationAndUseVariadicOp( + const std::shared_ptr& graph, + NodeKind op, + NodeKind variadic_op); + +// Convenient functions for replacing aten::stack/aten::cat with their +// variadic versions. +TORCH_API bool UseVariadicCat(const std::shared_ptr& graph); +TORCH_API bool RemoveListMutationAndUseVariadicCat( + const std::shared_ptr& graph); + +TORCH_API bool UseVariadicStack(const std::shared_ptr& graph); +TORCH_API bool RemoveListMutationAndUseVariadicStack( + const std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/resource_guard.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/resource_guard.h new file mode 100644 index 0000000000000000000000000000000000000000..48c689b6cbfbd7bfb29e352b142e7b7baa8b1484 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/resource_guard.h @@ -0,0 +1,27 @@ +#pragma once +#include + +namespace torch { +namespace jit { + +class ResourceGuard { + std::function _destructor; + bool _released; + + public: + ResourceGuard(std::function destructor) + : _destructor(std::move(destructor)), _released(false) {} + + // NOLINTNEXTLINE(bugprone-exception-escape) + ~ResourceGuard() { + if (!_released) + _destructor(); + } + + void release() { + _released = true; + } +}; + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/export.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/export.h new file mode 100644 index 0000000000000000000000000000000000000000..3a56cfc7788fb81a55ddbad966f44afd9362c73b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/export.h @@ -0,0 +1,280 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ONNX_NAMESPACE { +class ModelProto; +} + +namespace torch { +namespace jit { + +// This map is used to keep track of parameters that should be exported +// externally. When `defer_weight_export` is true, the returned map contains +// kv pairs that map {external reference name} -> {at::Tensor to be exported}. +// It is the responsibility of the caller to export these appropriately. +// +// For example, when exporting to a zip archive, the caller may write out files +// for each entry in the export map, with the filename being the key and the +// file contents being the raw tensor data. +using RawDataExportMap = std::unordered_map; + +using SymbolDimMap = std::map; + +using NodeNameMap = std::unordered_map; + +// Used for modularized export settling function and node attributes. +using NodeAttrNameMap = std:: + unordered_map>; + +TORCH_API std::tuple< + std::shared_ptr<::ONNX_NAMESPACE::ModelProto>, + RawDataExportMap, + SymbolDimMap, + bool, + NodeNameMap> +export_onnx( + const std::shared_ptr& graph, + const std::map& initializers, + int64_t onnx_opset_version, + const std::unordered_map< + std::string, + std::unordered_map>& dynamic_axes, + bool defer_weight_export = false, + ::torch::onnx::OperatorExportTypes operator_export_type = + ::torch::onnx::OperatorExportTypes::ONNX, + bool strip_doc_string = true, + bool keep_initializers_as_inputs = true, + const std::map& custom_opsets = {}, + bool add_node_names = true, + bool use_external_data_format = false, + const std::string& onnx_file_path = std::string(), + const NodeAttrNameMap& node_attr_to_name = {}); + +TORCH_API std::string serialize_model_proto_to_string( + const std::shared_ptr<::ONNX_NAMESPACE::ModelProto>& model_proto); + +TORCH_API void check_onnx_proto(const std::string& proto_string); + +// Serializer for both oldsyle and unified format TorchScript serialization +class TORCH_API ScriptModuleSerializer { + public: + explicit ScriptModuleSerializer( + caffe2::serialize::PyTorchStreamWriter& export_writer) + : writer_(export_writer), current_source_range_tag_(0) {} + + void writeFiles(const std::string& code_dir); + void serialize( + const Module& module, + const ExtraFilesMap& extra_files, + bool bytecode_format, + bool save_mobile_debug_info); + void serialize_unified_format(Module& module, uint64_t script_module_id); + SerializationStorageContext& storage_context(); + + ~ScriptModuleSerializer() = default; + + private: + void convertNamedType(const c10::NamedTypePtr& class_type); + void convertTypes(const at::NamedTypePtr& root_type); + void writeExtraFiles(const Module& module, const ExtraFilesMap& extra_files); + void writeByteCode(const Module& module, bool save_mobile_debug_info); + void writeArchive( + const IValue& value, + const std::string& archive_name, + const std::string& archive_dir, + const std::string& tensor_dir, + bool use_storage_context = false, + bool skip_tensor_data = false); + void updateSourceRangeTags(const SourceRangeRecords& ranges); + + caffe2::serialize::PyTorchStreamWriter& writer_; + std::vector constant_table_; + + std::unordered_set converted_types_; + PrintDepsTable class_deps_; + TypeNameUniquer type_name_uniquer_; + // qualifier, e.g. '__torch__.Bar' -> PythonPrint for the file that will be + // created + OrderedDict file_streams_; + // Used to keep references of storages around during serialization to solve + // for ABA memory reuse problem hit when storages are created/destroyed + // during serialization process. Also used to coordinate sharing of storages + // between Script and eager modules in torch.package. + SerializationStorageContext storage_context_; + + // Uniquely identifies a SourceRange in a model. + // SourceRanges are associated with Nodes of Graphs. + // However for mobile deployment we dont intend to ship + // full JIT with capabilities of reading code and constructing + // graphs. + // Instead we serialize the Code generated from graph of the methods. + // Code is serialized in bytecode format that contains instructions + // corresponding to the nodes of the graph. Since original graph is gone, the + // question is how do we identify where the ops, in serialized bytecode, come + // from in original model code. We do this in two parts. + // 1. Associate a unique tag to SourceRange. + // 2. Serialize this unique_tag. + // 2.1 Meaning save instead of + // + // 3. During serializing model for mobile, i.e. bytecode generation, + // save unique tag of SourceRange corresponding to the Node. + // 4. During deserialization, read all the debug_pkl, to construct a map + // of and use tag saved with OPs in bytecode + // to lookup the source range. + // Strictly speaking we will serialize InlinedCallStack directly, which + // contains SourceRange. This way we have access to entire callstack and not + // just source information about where the node is, since bytecode inlines the + // graph before saving it. + SourceRangeTagMap source_range_tags_; + int64_t current_source_range_tag_; +}; + +// For testing purposes +TORCH_API std::string pretty_print_onnx( + const std::shared_ptr& graph, + const std::map& initializers, + int64_t onnx_opset_version, + bool defer_weight_export, + ::torch::onnx::OperatorExportTypes operator_export_type = + ::torch::onnx::OperatorExportTypes::ONNX, + bool google_printer = false, + bool keep_initializers_as_inputs = true, + const std::map& custom_opsets = {}, + bool add_node_names = true); + +TORCH_API void ExportModule( + const Module& module, + std::ostream& out, + const ExtraFilesMap& metadata = ExtraFilesMap(), + bool bytecode_format = false, + bool save_mobile_debug_info = false, + bool use_flatbuffer = false); + +TORCH_API void ExportModule( + const Module& module, + const std::string& filename, + const ExtraFilesMap& metadata = ExtraFilesMap(), + bool bytecode_format = false, + bool save_mobile_debug_info = false, + bool use_flatbuffer = false); + +TORCH_API void ExportModule( + const Module& module, + const std::function& writer_func, + const ExtraFilesMap& metadata = ExtraFilesMap(), + bool bytecode_format = false, + bool save_mobile_debug_info = false, + bool use_flatbuffer = false); + +// Write the bytes of a pickle archive and the tensors referenced inside that +// archive +TORCH_API void writeArchiveAndTensors( + const std::string& archive_name, + const char* pickle_bytes, + size_t size, + const std::vector& tensors, + caffe2::serialize::PyTorchStreamWriter& out); + +// Surrounding system can install an additional hook to produce extra files +// with metadata based on environment every time a module is serialized. +using ExportModuleExtraFilesHook = std::function; +TORCH_API void SetExportModuleExtraFilesHook(ExportModuleExtraFilesHook hook); + +/** + * Generates new bytecode for a Script module and returns what the op list + * would be for a LiteScriptModule based off the current code base. If you + * have a LiteScriptModule and want to get the currently present + * list of ops call _export_operator_list instead. + */ +TORCH_API std::vector export_opnames(const Module& m); + +struct TORCH_API BytecodeEmitMode { + static bool is_default_value_for_unspecified_arg_enabled(); + static void set_default_value_for_unspecified_arg_enabled(bool enabled); + + static bool is_default_args_before_out_args_enabled(); + static void set_default_args_before_out_args_enabled(bool enabled); + + static bool is_emit_promoted_ops_enabled(); + static void set_default_emit_promoted_ops_enabled(bool enabled); +}; + +// RAII guard to switch the way JIT emits the bytecode for inputs. +// default_value_for_unspecified_arg: +// true: instruction of default argument values (like LOADC) is emitted. +// false: instruction of default argument values are not emitted. Instead +// they are fetched from operator schema. +// default_args_before_out_args (to forward compatibile support +// operators allowing out arguments and default arguments): +// true: the number of specified arguments will deserialized to (#all_args - +// #default_args). false: the number of specified arguments will deserialized to +// (#all_args). +struct TORCH_API BytecodeEmitModeGuard { + BytecodeEmitModeGuard( + bool enable_default_value_for_unspecified_arg, + bool enable_default_args_before_out_args, + bool enable_emit_promoted_ops) + : prev_default_value_for_unspecified_arg_mode( + BytecodeEmitMode::is_default_value_for_unspecified_arg_enabled()), + prev_default_args_before_out_args( + BytecodeEmitMode::is_default_args_before_out_args_enabled()), + prev_default_emit_promoted_ops( + BytecodeEmitMode::is_emit_promoted_ops_enabled()) { + BytecodeEmitMode::set_default_value_for_unspecified_arg_enabled( + enable_default_value_for_unspecified_arg); + BytecodeEmitMode::set_default_args_before_out_args_enabled( + enable_default_args_before_out_args); + BytecodeEmitMode::set_default_emit_promoted_ops_enabled( + enable_emit_promoted_ops); + } + ~BytecodeEmitModeGuard() { + BytecodeEmitMode::set_default_value_for_unspecified_arg_enabled( + prev_default_value_for_unspecified_arg_mode); + BytecodeEmitMode::set_default_args_before_out_args_enabled( + prev_default_args_before_out_args); + BytecodeEmitMode::set_default_emit_promoted_ops_enabled( + prev_default_emit_promoted_ops); + } + bool prev_default_value_for_unspecified_arg_mode; + bool prev_default_args_before_out_args; + bool prev_default_emit_promoted_ops; +}; + +TORCH_API IValue to_tuple(std::vector ivalues); +TORCH_API IValue +Table(const std::vector>& entries); + +// TODO remove these switches once interface call is rolled out. +TORCH_API void enableMobileInterfaceCallExport(); +bool getMobileInterfaceCallExport(); + +TORCH_API CompilationOptions getOptionsFromGlobal(); + +TORCH_API void save_jit_module( + const Module& module, + const std::string& filename, + const ExtraFilesMap& extra_files = ExtraFilesMap()); + +TORCH_API DetachedBuffer::UniqueDetachedBuffer save_jit_module_to_bytes( + const Module& module, + const ExtraFilesMap& extra_files = ExtraFilesMap()); + +TORCH_API void save_jit_module_to_write_func( + const Module& module, + const ExtraFilesMap& extra_files, + bool save_mobile_debug_info, + const std::function& writer_func); + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/export_bytecode.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/export_bytecode.h new file mode 100644 index 0000000000000000000000000000000000000000..96397a56eac8617b94d6562f38406d5b8a9136a4 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/export_bytecode.h @@ -0,0 +1,46 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +struct TORCH_API CompilationOptions { + bool incl_interface_call = false; + bool enable_default_value_for_unspecified_arg = false; + bool enable_default_args_before_out_args = true; + bool enable_emit_promoted_ops = true; + int model_version = caffe2::serialize::kProducedBytecodeVersion; +}; + +TORCH_API mobile::Module jitModuleToMobile( + const Module& module, + const CompilationOptions& options); + +mobile::Code compileGraphToMobileCode( + const std::string& name, + const std::shared_ptr& graph, + const CompilationOptions& compilation_options, + BackendDebugInfoRecorder& debug_info_recorder); + +TORCH_API std::unique_ptr convertJitFunctionToMobileFunction( + const GraphFunction& function, + const CompilationOptions& options); + +TORCH_API IValue convertMobileFunctionToCodeTable( + const mobile::Function& func, + const CompilationOptions& compilation_options); + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/flatbuffer_serializer.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/flatbuffer_serializer.h new file mode 100644 index 0000000000000000000000000000000000000000..43e8062ef2dce517ae252fe5f8569a0084121e17 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/flatbuffer_serializer.h @@ -0,0 +1,94 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include +#include + +/** + * Defines the public API for serializing mobile modules to flatbuffer. + * Note that this header must not include or depend on flatbuffer-defined + * types, to avoid leaking those details to PyTorch clients. + */ + +namespace torch { +namespace jit { + +/// Maps file names to file contents. +using ExtraFilesMap = std::unordered_map; + +/** + * Represents a span of data. Typically owned by a UniqueDetachedBuffer. + */ +class TORCH_API DetachedBuffer final { + public: + /// Creates a new DetachedBuffer with an optional data owner. This interface + /// is provided to let users create objects of this type for testing. + DetachedBuffer(void* data, size_t size, void* internal_data_owner = nullptr) + : data_(data), size_(size), data_owner_(internal_data_owner) {} + + /// Returns a pointer to the data. + C10_NODISCARD void* data() { + return data_; + } + /// Returns a pointer to the data. + C10_NODISCARD const void* data() const { + return data_; + } + /// Returns the size of the data, in bytes. + C10_NODISCARD size_t size() const { + return size_; + } + + /// Wrapper type that typically owns data_owner_. + using UniqueDetachedBuffer = + std::unique_ptr>; + + private: + /// Deletes the owner, if present, and the buf itself. + /// Note: we could have provided a movable type with a destructor that did + /// this work, but the unique wrapper was easier in practice. + static void destroy(DetachedBuffer* buf); + + /// Provides access to destroy() for implementation and testing. + friend struct DetachedBufferFriend; + friend struct DetachedBufferTestingFriend; + + /// Pointer to the data. Not owned by this class. + void* data_; + /// The size of `data_`, in bytes. + size_t size_; + /// Opaque pointer to the underlying owner of `data_`. This class + /// (DetachedBuffer) does not own the owner or the data. It will typically be + /// owned by a UniqueDetachedBuffer that knows how to delete the owner along + /// with this class. + void* data_owner_; +}; + +TORCH_API void save_mobile_module( + const mobile::Module& module, + const std::string& filename, + const ExtraFilesMap& extra_files = ExtraFilesMap(), + const ExtraFilesMap& jit_sources = ExtraFilesMap(), + const std::vector& jit_constants = {}); + +TORCH_API DetachedBuffer::UniqueDetachedBuffer save_mobile_module_to_bytes( + const mobile::Module& module, + const ExtraFilesMap& extra_files = ExtraFilesMap(), + const ExtraFilesMap& jit_sources = ExtraFilesMap(), + const std::vector& jit_constants = {}); + +TORCH_API void save_mobile_module_to_func( + const mobile::Module& module, + const std::function& writer_func); + +// TODO(qihan): delete +TORCH_API bool register_flatbuffer_serializer(); + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/flatbuffer_serializer_jit.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/flatbuffer_serializer_jit.h new file mode 100644 index 0000000000000000000000000000000000000000..b43ab831f17735ae262b36b724bed5215c492f7c --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/flatbuffer_serializer_jit.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API bool register_flatbuffer_all(); + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import.h new file mode 100644 index 0000000000000000000000000000000000000000..c8379f38810f70990969ca6b58afdb58748db87e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import.h @@ -0,0 +1,157 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace caffe2 { +namespace serialize { +class ReadAdapterInterface; +} // namespace serialize +} // namespace caffe2 + +namespace torch { +namespace jit { + +class DeserializationStorageContext; + +TORCH_API Module import_ir_module( + std::shared_ptr cu, + const std::string& filename, + c10::optional device = c10::nullopt, + bool load_debug_files = true); + +TORCH_API Module import_ir_module( + std::shared_ptr cu, + std::istream& in, + c10::optional device = c10::nullopt, + bool load_debug_files = true); + +TORCH_API Module import_ir_module( + std::shared_ptr cu, + std::unique_ptr rai, + c10::optional device = c10::nullopt, + bool load_debug_files = true); + +TORCH_API Module import_ir_module( + std::shared_ptr cu, + const std::string& filename, + c10::optional device, + ExtraFilesMap& extra_files, + bool load_debug_files = true, + bool restore_shapes = false); + +// For reading unified serialization format from torch.Package +TORCH_API Module import_ir_module( + std::shared_ptr cu, + std::shared_ptr reader, + std::shared_ptr storage_context, + c10::optional device, + std::string ts_id /* torchscript identifier inside package */); + +TORCH_API Module import_ir_module( + std::shared_ptr cu, + std::istream& in, + c10::optional device, + ExtraFilesMap& extra_files, + bool load_debug_files = true, + bool restore_shapes = false); + +TORCH_API Module import_ir_module( + std::shared_ptr cu, + std::unique_ptr rai, + c10::optional device, + ExtraFilesMap& extra_files, + bool load_debug_files = true); + +TORCH_API Module import_ir_module( + std::shared_ptr cu, + std::shared_ptr rai, + c10::optional device, + ExtraFilesMap& extra_files, + bool load_debug_files = true); + +/// Loads a serialized `Module` from the given `istream`. +/// +/// The istream must contain a serialized `Module`, exported via +/// `torch::jit::ExportModule` in C++. +TORCH_API Module load( + std::istream& in, + c10::optional device = c10::nullopt, + bool load_debug_files = true); + +TORCH_API Module load( + std::istream& in, + c10::optional device, + ExtraFilesMap& extra_files, + bool load_debug_files = true); + +/// Loads a serialized `Module` from the given `filename`. +/// +/// The file stored at the location given in `filename` must contain a +/// serialized `Module`, exported either via `ScriptModule.save()` in +/// Python or `torch::jit::ExportModule` in C++. +TORCH_API Module load( + const std::string& filename, + c10::optional device = c10::nullopt, + bool load_debug_files = true); + +TORCH_API Module load( + const std::string& filename, + c10::optional device, + ExtraFilesMap& extra_files, + bool load_debug_files = true); + +/// Loads a serialized `Module` from the given shared_ptr `rai`. +/// +/// The reader adapter, which is for customized input stream, must contain a +/// serialized `Module`, exported either via `ScriptModule.save()` in +/// Python or `torch::jit::ExportModule` in C++. +TORCH_API Module load( + std::shared_ptr rai, + c10::optional device = c10::nullopt, + bool load_debug_files = true); + +TORCH_API Module load( + std::shared_ptr rai, + c10::optional device, + ExtraFilesMap& extra_files, + bool load_debug_files = true); + +TORCH_API Module jitModuleFromSourceAndConstants( + const IValue& ivalue, + const ExtraFilesMap& source, + const std::vector& constants, + int32_t version); + +TORCH_API Module parse_and_initialize_jit_module( + std::shared_ptr data, + size_t size, + ExtraFilesMap& extra_files, + c10::optional device = c10::nullopt); + +TORCH_API Module load_jit_module_from_file( + const std::string& filename, + ExtraFilesMap& extra_files, + c10::optional device = c10::nullopt); + +TORCH_API Module load_jit_module_from_stream( + std::istream& in, + ExtraFilesMap& extra_files, + c10::optional device = c10::nullopt); + +TORCH_API Module parse_and_initialize_jit_module( + std::shared_ptr data, + size_t size, + ExtraFilesMap& extra_files, + c10::optional device); + +TORCH_API c10::intrusive_ptr ObjLoaderFunc( + const at::StrongTypePtr& type, + IValue input); + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_export_constants.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_export_constants.h new file mode 100644 index 0000000000000000000000000000000000000000..704d7239f2b46cceda6eb07c1cb3c1864bf8bc95 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_export_constants.h @@ -0,0 +1,21 @@ +#pragma once +#include + +namespace torch { +namespace jit { +constexpr size_t BYTECODE_INDEX_INSTRUCTION = 0; +constexpr size_t BYTECODE_INDEX_OPERATOR = 1; +constexpr size_t BYTECODE_INDEX_CONSTANT = 2; +constexpr size_t BYTECODE_INDEX_TYPE = 3; +constexpr size_t BYTECODE_INDEX_REGISTER_SIZE = 4; + +constexpr size_t BYTECODE_INDEX_SCHEMA_ARGUMENTS = 0; +constexpr size_t BYTECODE_INDEX_SCHEMA_RETURNS = 1; + +constexpr size_t BYTECODE_INDEX_ARGUMENT_NAME = 0; +constexpr size_t BYTECODE_INDEX_ARGUMENT_TYPE = 1; +constexpr size_t BYTECODE_INDEX_ARGUMENT_DEFAULT_VALUE = 2; + +constexpr size_t BYTECODE_INDEX_MODULE_DEBUG_HANDLES = 0; +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_export_functions.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_export_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..0de156bfb753c4aa78c7a0f62d9a167acb205ab9 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_export_functions.h @@ -0,0 +1,16 @@ +#pragma once +#include + +// Functions that are used in both import and export processes +namespace torch { +namespace jit { +using c10::IValue; +IValue expect_field( + c10::ivalue::TupleElements& elements, + const std::string& expected_name, + size_t entry); +std::string operator_str( + const std::string& name, + const std::string& overloadname); +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_export_helpers.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_export_helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..c94993207e6a3cd0c9277496ee4dbe2ee547b345 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_export_helpers.h @@ -0,0 +1,32 @@ +#pragma once + +#include +#include + +namespace caffe2 { +namespace serialize { +class PyTorchStreamReader; +} +} // namespace caffe2 + +namespace torch { +namespace jit { + +struct Source; + +// Convert a class type's qualifier name to the corresponding path the source +// file it should be written to. +// +// Qualifier is like: foo.bar.baz +// Returns: libs/foo/bar/baz.py +std::string qualifierToArchivePath( + const std::string& qualifier, + const std::string& export_prefix); + +std::shared_ptr findSourceInArchiveFromQualifier( + caffe2::serialize::PyTorchStreamReader& reader, + const std::string& export_prefix, + const std::string& qualifier); + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_legacy.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_legacy.h new file mode 100644 index 0000000000000000000000000000000000000000..a2618281095969011b9ee4483794c7e6ce1422b2 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_legacy.h @@ -0,0 +1,23 @@ +#pragma once + +#include + +namespace caffe2 { +namespace serialize { +class PyTorchStreamReader; +} // namespace serialize +} // namespace caffe2 + +namespace torch { +namespace jit { + +struct CompilationUnit; + +// Deserializes a model in legacy format. +Module LEGACY_deserialize( + std::shared_ptr cu, + std::shared_ptr reader, + const c10::optional& device); + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_read.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_read.h new file mode 100644 index 0000000000000000000000000000000000000000..ab89f93880c34f4dfda499e46852c824af953b37 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_read.h @@ -0,0 +1,31 @@ +#pragma once + +#include +#include + +namespace caffe2 { +namespace serialize { +class PyTorchStreamReader; +} // namespace serialize +} // namespace caffe2 + +namespace torch { +namespace jit { + +TORCH_API IValue readArchiveAndTensors( + const std::string& archive_name, + const std::string& pickle_prefix, + const std::string& tensor_prefix, + c10::optional type_resolver, + c10::optional obj_loader, + c10::optional device, + caffe2::serialize::PyTorchStreamReader& stream_reader, + c10::TypePtr (*type_parser)(const std::string&) = + Unpickler::defaultTypeParser, + std::shared_ptr storage_context = nullptr); + +bool check_zip_file( + std::shared_ptr rai); + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_source.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_source.h new file mode 100644 index 0000000000000000000000000000000000000000..9a720a81bcbb2085a4f3f21ff5e73d45d9afaf1b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_source.h @@ -0,0 +1,103 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +using SourceLoader = std::function(const std::string&)>; + +struct SourceImporterImpl : public Resolver, + std::enable_shared_from_this { + SourceImporterImpl( + std::shared_ptr cu, + const std::vector* constant_table, + SourceLoader source_loader, + size_t version); + TypePtr findNamedType(const QualifiedName& name); + Function* findFunction(const QualifiedName& name); + void parseSourceIfNeeded(const std::string& qualifier); + void LEGACY_import_methods( + const Module& mod, + const std::shared_ptr& src); + + std::shared_ptr resolveValue( + const std::string& name, + GraphFunction& m, + const SourceRange& loc) override; + TypePtr resolveType(const std::string& name, const SourceRange& loc) override; + + private: + void importFunction(const std::string& qualifier, const Def& def); + void importNamedType(const std::string& qualifier, const ClassDef& class_def); + c10::optional attributeAssignmentSpecialHandlingHack( + const QualifiedName& qualified_classname, + const Assign& assign); + void importClass( + const QualifiedName& qualified_classname, + const ClassDef& class_def, + bool is_module); + void importEnum( + const QualifiedName& qualified_name, + const ClassDef& enum_def); + void importNamedTuple( + const QualifiedName& qualified_name, + const ClassDef& named_tuple_def); + + void parsePossibleVersionNumber(Lexer& L); + + void parseImports(Lexer& L); + + std::shared_ptr cu_; + std::unordered_map> env_; + SourceLoader source_loader_; + c10::optional version_ = c10::nullopt; + std::unordered_set loaded_sources_; + // named types and functions loaded from a file but not yet defined because + // their type has not been requested yet. + std::unordered_map to_be_defined_; +}; + +// Given a directory of serialized TorchScript sources, +// This class allows the loading of individual named types in source. +// Resolves the dependencies between source files and parses +// the source files as necessary. + +struct TORCH_API SourceImporter { + SourceImporter( + // The compilation unit that will own the imported source + std::shared_ptr cu, + const std::vector* constant_table, + SourceLoader loader, + size_t version); + + TypePtr loadType(const QualifiedName& name) const; + + // Add the methods defined in `src` to the module `mod`, using SourceImporter + // to resolve any classes via loadType + void LEGACY_import_methods( + const Module& mod, + const std::shared_ptr& src); + ~SourceImporter(); + + private: + std::shared_ptr pImpl; +}; + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/mobile_bytecode_generated.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/mobile_bytecode_generated.h new file mode 100644 index 0000000000000000000000000000000000000000..cffe8bc7a636457ea3452fba9c040a71cb71fa5e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/mobile_bytecode_generated.h @@ -0,0 +1,2599 @@ +// automatically generated by the FlatBuffers compiler, do not modify + + +#ifndef FLATBUFFERS_GENERATED_MOBILEBYTECODE_TORCH_JIT_MOBILE_SERIALIZATION_H_ +#define FLATBUFFERS_GENERATED_MOBILEBYTECODE_TORCH_JIT_MOBILE_SERIALIZATION_H_ + +#include "flatbuffers/flatbuffers.h" + +// Ensure the included flatbuffers.h is the same version as when this file was +// generated, otherwise it may not be compatible. +static_assert(FLATBUFFERS_VERSION_MAJOR == 23 && + FLATBUFFERS_VERSION_MINOR == 3 && + FLATBUFFERS_VERSION_REVISION == 3, + "Non-compatible flatbuffers version included"); + +namespace torch { +namespace jit { +namespace mobile { +namespace serialization { + +struct Int; + +struct Bool; + +struct Double; + +struct PerTensorAffineSchema; + +struct QuantizedSchema; +struct QuantizedSchemaBuilder; + +struct TensorMetadata; +struct TensorMetadataBuilder; + +struct String; +struct StringBuilder; + +struct Device; +struct DeviceBuilder; + +struct List; +struct ListBuilder; + +struct IntList; +struct IntListBuilder; + +struct DoubleList; +struct DoubleListBuilder; + +struct BoolList; +struct BoolListBuilder; + +struct Tuple; +struct TupleBuilder; + +struct Dict; +struct DictBuilder; + +struct ObjectType; +struct ObjectTypeBuilder; + +struct Object; +struct ObjectBuilder; + +struct ComplexDouble; + +struct EnumValue; +struct EnumValueBuilder; + +struct Instruction; + +struct Operator; +struct OperatorBuilder; + +struct Arg; +struct ArgBuilder; + +struct Schema; +struct SchemaBuilder; + +struct DebugInfo; +struct DebugInfoBuilder; + +struct Function; +struct FunctionBuilder; + +struct StorageData; +struct StorageDataBuilder; + +struct IValue; +struct IValueBuilder; + +struct ExtraFile; +struct ExtraFileBuilder; + +struct Module; +struct ModuleBuilder; + +enum class TypeType : uint8_t { + UNSET = 0, + CLASS_WITH_FIELD = 1, + CUSTOM_CLASS = 2, + CLASS_WITH_SETSTATE = 3, + NON_OBJ = 4, + MIN = UNSET, + MAX = NON_OBJ +}; + +inline const TypeType (&EnumValuesTypeType())[5] { + static const TypeType values[] = { + TypeType::UNSET, + TypeType::CLASS_WITH_FIELD, + TypeType::CUSTOM_CLASS, + TypeType::CLASS_WITH_SETSTATE, + TypeType::NON_OBJ + }; + return values; +} + +inline const char * const *EnumNamesTypeType() { + static const char * const names[6] = { + "UNSET", + "CLASS_WITH_FIELD", + "CUSTOM_CLASS", + "CLASS_WITH_SETSTATE", + "NON_OBJ", + nullptr + }; + return names; +} + +inline const char *EnumNameTypeType(TypeType e) { + if (::flatbuffers::IsOutRange(e, TypeType::UNSET, TypeType::NON_OBJ)) return ""; + const size_t index = static_cast(e); + return EnumNamesTypeType()[index]; +} + +enum class IValueUnion : uint8_t { + NONE = 0, + Int = 1, + Bool = 2, + Double = 3, + ComplexDouble = 4, + TensorMetadata = 5, + String = 6, + List = 7, + Tuple = 8, + Dict = 9, + Object = 10, + IntList = 11, + DoubleList = 12, + BoolList = 13, + Device = 14, + EnumValue = 15, + Function = 16, + MIN = NONE, + MAX = Function +}; + +inline const IValueUnion (&EnumValuesIValueUnion())[17] { + static const IValueUnion values[] = { + IValueUnion::NONE, + IValueUnion::Int, + IValueUnion::Bool, + IValueUnion::Double, + IValueUnion::ComplexDouble, + IValueUnion::TensorMetadata, + IValueUnion::String, + IValueUnion::List, + IValueUnion::Tuple, + IValueUnion::Dict, + IValueUnion::Object, + IValueUnion::IntList, + IValueUnion::DoubleList, + IValueUnion::BoolList, + IValueUnion::Device, + IValueUnion::EnumValue, + IValueUnion::Function + }; + return values; +} + +inline const char * const *EnumNamesIValueUnion() { + static const char * const names[18] = { + "NONE", + "Int", + "Bool", + "Double", + "ComplexDouble", + "TensorMetadata", + "String", + "List", + "Tuple", + "Dict", + "Object", + "IntList", + "DoubleList", + "BoolList", + "Device", + "EnumValue", + "Function", + nullptr + }; + return names; +} + +inline const char *EnumNameIValueUnion(IValueUnion e) { + if (::flatbuffers::IsOutRange(e, IValueUnion::NONE, IValueUnion::Function)) return ""; + const size_t index = static_cast(e); + return EnumNamesIValueUnion()[index]; +} + +template struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::NONE; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::Int; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::Bool; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::Double; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::ComplexDouble; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::TensorMetadata; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::String; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::List; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::Tuple; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::Dict; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::Object; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::IntList; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::DoubleList; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::BoolList; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::Device; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::EnumValue; +}; + +template<> struct IValueUnionTraits { + static const IValueUnion enum_value = IValueUnion::Function; +}; + +bool VerifyIValueUnion(::flatbuffers::Verifier &verifier, const void *obj, IValueUnion type); +bool VerifyIValueUnionVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset> *values, const ::flatbuffers::Vector *types); + +FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(8) Int FLATBUFFERS_FINAL_CLASS { + private: + int64_t int_val_; + + public: + Int() + : int_val_(0) { + } + Int(int64_t _int_val) + : int_val_(::flatbuffers::EndianScalar(_int_val)) { + } + int64_t int_val() const { + return ::flatbuffers::EndianScalar(int_val_); + } + void mutate_int_val(int64_t _int_val) { + ::flatbuffers::WriteScalar(&int_val_, _int_val); + } +}; +FLATBUFFERS_STRUCT_END(Int, 8); + +FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(1) Bool FLATBUFFERS_FINAL_CLASS { + private: + uint8_t bool_val_; + + public: + Bool() + : bool_val_(0) { + } + Bool(bool _bool_val) + : bool_val_(::flatbuffers::EndianScalar(static_cast(_bool_val))) { + } + bool bool_val() const { + return ::flatbuffers::EndianScalar(bool_val_) != 0; + } + void mutate_bool_val(bool _bool_val) { + ::flatbuffers::WriteScalar(&bool_val_, static_cast(_bool_val)); + } +}; +FLATBUFFERS_STRUCT_END(Bool, 1); + +FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(8) Double FLATBUFFERS_FINAL_CLASS { + private: + double double_val_; + + public: + Double() + : double_val_(0) { + } + Double(double _double_val) + : double_val_(::flatbuffers::EndianScalar(_double_val)) { + } + double double_val() const { + return ::flatbuffers::EndianScalar(double_val_); + } + void mutate_double_val(double _double_val) { + ::flatbuffers::WriteScalar(&double_val_, _double_val); + } +}; +FLATBUFFERS_STRUCT_END(Double, 8); + +FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(8) PerTensorAffineSchema FLATBUFFERS_FINAL_CLASS { + private: + double q_scale_; + int32_t q_zero_point_; + int32_t padding0__; + + public: + PerTensorAffineSchema() + : q_scale_(0), + q_zero_point_(0), + padding0__(0) { + (void)padding0__; + } + PerTensorAffineSchema(double _q_scale, int32_t _q_zero_point) + : q_scale_(::flatbuffers::EndianScalar(_q_scale)), + q_zero_point_(::flatbuffers::EndianScalar(_q_zero_point)), + padding0__(0) { + (void)padding0__; + } + double q_scale() const { + return ::flatbuffers::EndianScalar(q_scale_); + } + void mutate_q_scale(double _q_scale) { + ::flatbuffers::WriteScalar(&q_scale_, _q_scale); + } + int32_t q_zero_point() const { + return ::flatbuffers::EndianScalar(q_zero_point_); + } + void mutate_q_zero_point(int32_t _q_zero_point) { + ::flatbuffers::WriteScalar(&q_zero_point_, _q_zero_point); + } +}; +FLATBUFFERS_STRUCT_END(PerTensorAffineSchema, 16); + +FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(8) ComplexDouble FLATBUFFERS_FINAL_CLASS { + private: + double real_; + double imag_; + + public: + ComplexDouble() + : real_(0), + imag_(0) { + } + ComplexDouble(double _real, double _imag) + : real_(::flatbuffers::EndianScalar(_real)), + imag_(::flatbuffers::EndianScalar(_imag)) { + } + double real() const { + return ::flatbuffers::EndianScalar(real_); + } + void mutate_real(double _real) { + ::flatbuffers::WriteScalar(&real_, _real); + } + double imag() const { + return ::flatbuffers::EndianScalar(imag_); + } + void mutate_imag(double _imag) { + ::flatbuffers::WriteScalar(&imag_, _imag); + } +}; +FLATBUFFERS_STRUCT_END(ComplexDouble, 16); + +FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(4) Instruction FLATBUFFERS_FINAL_CLASS { + private: + int8_t op_; + int8_t padding0__; + uint16_t n_; + int32_t x_; + + public: + Instruction() + : op_(0), + padding0__(0), + n_(0), + x_(0) { + (void)padding0__; + } + Instruction(int8_t _op, uint16_t _n, int32_t _x) + : op_(::flatbuffers::EndianScalar(_op)), + padding0__(0), + n_(::flatbuffers::EndianScalar(_n)), + x_(::flatbuffers::EndianScalar(_x)) { + (void)padding0__; + } + int8_t op() const { + return ::flatbuffers::EndianScalar(op_); + } + void mutate_op(int8_t _op) { + ::flatbuffers::WriteScalar(&op_, _op); + } + uint16_t n() const { + return ::flatbuffers::EndianScalar(n_); + } + void mutate_n(uint16_t _n) { + ::flatbuffers::WriteScalar(&n_, _n); + } + int32_t x() const { + return ::flatbuffers::EndianScalar(x_); + } + void mutate_x(int32_t _x) { + ::flatbuffers::WriteScalar(&x_, _x); + } +}; +FLATBUFFERS_STRUCT_END(Instruction, 8); + +struct QuantizedSchema FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef QuantizedSchemaBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_QSCHEME = 4, + VT_SCALE = 6, + VT_ZERO_POINT = 8, + VT_SCALES = 10, + VT_ZERO_POINTS = 12, + VT_AXIS = 14 + }; + int8_t qscheme() const { + return GetField(VT_QSCHEME, 0); + } + bool mutate_qscheme(int8_t _qscheme = 0) { + return SetField(VT_QSCHEME, _qscheme, 0); + } + double scale() const { + return GetField(VT_SCALE, 0.0); + } + bool mutate_scale(double _scale = 0.0) { + return SetField(VT_SCALE, _scale, 0.0); + } + int32_t zero_point() const { + return GetField(VT_ZERO_POINT, 0); + } + bool mutate_zero_point(int32_t _zero_point = 0) { + return SetField(VT_ZERO_POINT, _zero_point, 0); + } + const torch::jit::mobile::serialization::TensorMetadata *scales() const { + return GetPointer(VT_SCALES); + } + torch::jit::mobile::serialization::TensorMetadata *mutable_scales() { + return GetPointer(VT_SCALES); + } + const torch::jit::mobile::serialization::TensorMetadata *zero_points() const { + return GetPointer(VT_ZERO_POINTS); + } + torch::jit::mobile::serialization::TensorMetadata *mutable_zero_points() { + return GetPointer(VT_ZERO_POINTS); + } + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + bool mutate_axis(int32_t _axis = 0) { + return SetField(VT_AXIS, _axis, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_QSCHEME, 1) && + VerifyField(verifier, VT_SCALE, 8) && + VerifyField(verifier, VT_ZERO_POINT, 4) && + VerifyOffset(verifier, VT_SCALES) && + verifier.VerifyTable(scales()) && + VerifyOffset(verifier, VT_ZERO_POINTS) && + verifier.VerifyTable(zero_points()) && + VerifyField(verifier, VT_AXIS, 4) && + verifier.EndTable(); + } +}; + +struct QuantizedSchemaBuilder { + typedef QuantizedSchema Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_qscheme(int8_t qscheme) { + fbb_.AddElement(QuantizedSchema::VT_QSCHEME, qscheme, 0); + } + void add_scale(double scale) { + fbb_.AddElement(QuantizedSchema::VT_SCALE, scale, 0.0); + } + void add_zero_point(int32_t zero_point) { + fbb_.AddElement(QuantizedSchema::VT_ZERO_POINT, zero_point, 0); + } + void add_scales(::flatbuffers::Offset scales) { + fbb_.AddOffset(QuantizedSchema::VT_SCALES, scales); + } + void add_zero_points(::flatbuffers::Offset zero_points) { + fbb_.AddOffset(QuantizedSchema::VT_ZERO_POINTS, zero_points); + } + void add_axis(int32_t axis) { + fbb_.AddElement(QuantizedSchema::VT_AXIS, axis, 0); + } + explicit QuantizedSchemaBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateQuantizedSchema( + ::flatbuffers::FlatBufferBuilder &_fbb, + int8_t qscheme = 0, + double scale = 0.0, + int32_t zero_point = 0, + ::flatbuffers::Offset scales = 0, + ::flatbuffers::Offset zero_points = 0, + int32_t axis = 0) { + QuantizedSchemaBuilder builder_(_fbb); + builder_.add_scale(scale); + builder_.add_axis(axis); + builder_.add_zero_points(zero_points); + builder_.add_scales(scales); + builder_.add_zero_point(zero_point); + builder_.add_qscheme(qscheme); + return builder_.Finish(); +} + +struct TensorMetadata FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef TensorMetadataBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_STORAGE_LOCATION_INDEX = 4, + VT_SCALAR_TYPE = 6, + VT_STORAGE_OFFSET = 8, + VT_SIZES = 10, + VT_STRIDES = 12, + VT_REQUIRES_GRAD = 14, + VT_QUANTIZED_SCHEMA = 16 + }; + uint32_t storage_location_index() const { + return GetField(VT_STORAGE_LOCATION_INDEX, 0); + } + bool mutate_storage_location_index(uint32_t _storage_location_index = 0) { + return SetField(VT_STORAGE_LOCATION_INDEX, _storage_location_index, 0); + } + int8_t scalar_type() const { + return GetField(VT_SCALAR_TYPE, 0); + } + bool mutate_scalar_type(int8_t _scalar_type = 0) { + return SetField(VT_SCALAR_TYPE, _scalar_type, 0); + } + int32_t storage_offset() const { + return GetField(VT_STORAGE_OFFSET, 0); + } + bool mutate_storage_offset(int32_t _storage_offset = 0) { + return SetField(VT_STORAGE_OFFSET, _storage_offset, 0); + } + const ::flatbuffers::Vector *sizes() const { + return GetPointer *>(VT_SIZES); + } + ::flatbuffers::Vector *mutable_sizes() { + return GetPointer<::flatbuffers::Vector *>(VT_SIZES); + } + const ::flatbuffers::Vector *strides() const { + return GetPointer *>(VT_STRIDES); + } + ::flatbuffers::Vector *mutable_strides() { + return GetPointer<::flatbuffers::Vector *>(VT_STRIDES); + } + bool requires_grad() const { + return GetField(VT_REQUIRES_GRAD, 0) != 0; + } + bool mutate_requires_grad(bool _requires_grad = 0) { + return SetField(VT_REQUIRES_GRAD, static_cast(_requires_grad), 0); + } + const torch::jit::mobile::serialization::QuantizedSchema *quantized_schema() const { + return GetPointer(VT_QUANTIZED_SCHEMA); + } + torch::jit::mobile::serialization::QuantizedSchema *mutable_quantized_schema() { + return GetPointer(VT_QUANTIZED_SCHEMA); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_STORAGE_LOCATION_INDEX, 4) && + VerifyField(verifier, VT_SCALAR_TYPE, 1) && + VerifyField(verifier, VT_STORAGE_OFFSET, 4) && + VerifyOffset(verifier, VT_SIZES) && + verifier.VerifyVector(sizes()) && + VerifyOffset(verifier, VT_STRIDES) && + verifier.VerifyVector(strides()) && + VerifyField(verifier, VT_REQUIRES_GRAD, 1) && + VerifyOffset(verifier, VT_QUANTIZED_SCHEMA) && + verifier.VerifyTable(quantized_schema()) && + verifier.EndTable(); + } +}; + +struct TensorMetadataBuilder { + typedef TensorMetadata Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_storage_location_index(uint32_t storage_location_index) { + fbb_.AddElement(TensorMetadata::VT_STORAGE_LOCATION_INDEX, storage_location_index, 0); + } + void add_scalar_type(int8_t scalar_type) { + fbb_.AddElement(TensorMetadata::VT_SCALAR_TYPE, scalar_type, 0); + } + void add_storage_offset(int32_t storage_offset) { + fbb_.AddElement(TensorMetadata::VT_STORAGE_OFFSET, storage_offset, 0); + } + void add_sizes(::flatbuffers::Offset<::flatbuffers::Vector> sizes) { + fbb_.AddOffset(TensorMetadata::VT_SIZES, sizes); + } + void add_strides(::flatbuffers::Offset<::flatbuffers::Vector> strides) { + fbb_.AddOffset(TensorMetadata::VT_STRIDES, strides); + } + void add_requires_grad(bool requires_grad) { + fbb_.AddElement(TensorMetadata::VT_REQUIRES_GRAD, static_cast(requires_grad), 0); + } + void add_quantized_schema(::flatbuffers::Offset quantized_schema) { + fbb_.AddOffset(TensorMetadata::VT_QUANTIZED_SCHEMA, quantized_schema); + } + explicit TensorMetadataBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateTensorMetadata( + ::flatbuffers::FlatBufferBuilder &_fbb, + uint32_t storage_location_index = 0, + int8_t scalar_type = 0, + int32_t storage_offset = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> sizes = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> strides = 0, + bool requires_grad = false, + ::flatbuffers::Offset quantized_schema = 0) { + TensorMetadataBuilder builder_(_fbb); + builder_.add_quantized_schema(quantized_schema); + builder_.add_strides(strides); + builder_.add_sizes(sizes); + builder_.add_storage_offset(storage_offset); + builder_.add_storage_location_index(storage_location_index); + builder_.add_requires_grad(requires_grad); + builder_.add_scalar_type(scalar_type); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateTensorMetadataDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + uint32_t storage_location_index = 0, + int8_t scalar_type = 0, + int32_t storage_offset = 0, + const std::vector *sizes = nullptr, + const std::vector *strides = nullptr, + bool requires_grad = false, + ::flatbuffers::Offset quantized_schema = 0) { + auto sizes__ = sizes ? _fbb.CreateVector(*sizes) : 0; + auto strides__ = strides ? _fbb.CreateVector(*strides) : 0; + return torch::jit::mobile::serialization::CreateTensorMetadata( + _fbb, + storage_location_index, + scalar_type, + storage_offset, + sizes__, + strides__, + requires_grad, + quantized_schema); +} + +struct String FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StringBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DATA = 4 + }; + const ::flatbuffers::String *data() const { + return GetPointer(VT_DATA); + } + ::flatbuffers::String *mutable_data() { + return GetPointer<::flatbuffers::String *>(VT_DATA); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_DATA) && + verifier.VerifyString(data()) && + verifier.EndTable(); + } +}; + +struct StringBuilder { + typedef String Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_data(::flatbuffers::Offset<::flatbuffers::String> data) { + fbb_.AddOffset(String::VT_DATA, data); + } + explicit StringBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateString( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::String> data = 0) { + StringBuilder builder_(_fbb); + builder_.add_data(data); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateStringDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const char *data = nullptr) { + auto data__ = data ? _fbb.CreateString(data) : 0; + return torch::jit::mobile::serialization::CreateString( + _fbb, + data__); +} + +struct Device FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef DeviceBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_STR = 4 + }; + const ::flatbuffers::String *str() const { + return GetPointer(VT_STR); + } + ::flatbuffers::String *mutable_str() { + return GetPointer<::flatbuffers::String *>(VT_STR); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_STR) && + verifier.VerifyString(str()) && + verifier.EndTable(); + } +}; + +struct DeviceBuilder { + typedef Device Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_str(::flatbuffers::Offset<::flatbuffers::String> str) { + fbb_.AddOffset(Device::VT_STR, str); + } + explicit DeviceBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateDevice( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::String> str = 0) { + DeviceBuilder builder_(_fbb); + builder_.add_str(str); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateDeviceDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const char *str = nullptr) { + auto str__ = str ? _fbb.CreateString(str) : 0; + return torch::jit::mobile::serialization::CreateDevice( + _fbb, + str__); +} + +struct List FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ListBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ITEMS = 4, + VT_ANNOTATION_STR = 6 + }; + const ::flatbuffers::Vector *items() const { + return GetPointer *>(VT_ITEMS); + } + ::flatbuffers::Vector *mutable_items() { + return GetPointer<::flatbuffers::Vector *>(VT_ITEMS); + } + const ::flatbuffers::String *annotation_str() const { + return GetPointer(VT_ANNOTATION_STR); + } + ::flatbuffers::String *mutable_annotation_str() { + return GetPointer<::flatbuffers::String *>(VT_ANNOTATION_STR); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_ITEMS) && + verifier.VerifyVector(items()) && + VerifyOffset(verifier, VT_ANNOTATION_STR) && + verifier.VerifyString(annotation_str()) && + verifier.EndTable(); + } +}; + +struct ListBuilder { + typedef List Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_items(::flatbuffers::Offset<::flatbuffers::Vector> items) { + fbb_.AddOffset(List::VT_ITEMS, items); + } + void add_annotation_str(::flatbuffers::Offset<::flatbuffers::String> annotation_str) { + fbb_.AddOffset(List::VT_ANNOTATION_STR, annotation_str); + } + explicit ListBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateList( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> items = 0, + ::flatbuffers::Offset<::flatbuffers::String> annotation_str = 0) { + ListBuilder builder_(_fbb); + builder_.add_annotation_str(annotation_str); + builder_.add_items(items); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateListDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *items = nullptr, + const char *annotation_str = nullptr) { + auto items__ = items ? _fbb.CreateVector(*items) : 0; + auto annotation_str__ = annotation_str ? _fbb.CreateString(annotation_str) : 0; + return torch::jit::mobile::serialization::CreateList( + _fbb, + items__, + annotation_str__); +} + +struct IntList FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef IntListBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ITEMS = 4 + }; + const ::flatbuffers::Vector *items() const { + return GetPointer *>(VT_ITEMS); + } + ::flatbuffers::Vector *mutable_items() { + return GetPointer<::flatbuffers::Vector *>(VT_ITEMS); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_ITEMS) && + verifier.VerifyVector(items()) && + verifier.EndTable(); + } +}; + +struct IntListBuilder { + typedef IntList Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_items(::flatbuffers::Offset<::flatbuffers::Vector> items) { + fbb_.AddOffset(IntList::VT_ITEMS, items); + } + explicit IntListBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateIntList( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> items = 0) { + IntListBuilder builder_(_fbb); + builder_.add_items(items); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateIntListDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *items = nullptr) { + auto items__ = items ? _fbb.CreateVector(*items) : 0; + return torch::jit::mobile::serialization::CreateIntList( + _fbb, + items__); +} + +struct DoubleList FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef DoubleListBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ITEMS = 4 + }; + const ::flatbuffers::Vector *items() const { + return GetPointer *>(VT_ITEMS); + } + ::flatbuffers::Vector *mutable_items() { + return GetPointer<::flatbuffers::Vector *>(VT_ITEMS); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_ITEMS) && + verifier.VerifyVector(items()) && + verifier.EndTable(); + } +}; + +struct DoubleListBuilder { + typedef DoubleList Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_items(::flatbuffers::Offset<::flatbuffers::Vector> items) { + fbb_.AddOffset(DoubleList::VT_ITEMS, items); + } + explicit DoubleListBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateDoubleList( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> items = 0) { + DoubleListBuilder builder_(_fbb); + builder_.add_items(items); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateDoubleListDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *items = nullptr) { + auto items__ = items ? _fbb.CreateVector(*items) : 0; + return torch::jit::mobile::serialization::CreateDoubleList( + _fbb, + items__); +} + +struct BoolList FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef BoolListBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ITEMS = 4 + }; + const ::flatbuffers::Vector *items() const { + return GetPointer *>(VT_ITEMS); + } + ::flatbuffers::Vector *mutable_items() { + return GetPointer<::flatbuffers::Vector *>(VT_ITEMS); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_ITEMS) && + verifier.VerifyVector(items()) && + verifier.EndTable(); + } +}; + +struct BoolListBuilder { + typedef BoolList Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_items(::flatbuffers::Offset<::flatbuffers::Vector> items) { + fbb_.AddOffset(BoolList::VT_ITEMS, items); + } + explicit BoolListBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateBoolList( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> items = 0) { + BoolListBuilder builder_(_fbb); + builder_.add_items(items); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateBoolListDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *items = nullptr) { + auto items__ = items ? _fbb.CreateVector(*items) : 0; + return torch::jit::mobile::serialization::CreateBoolList( + _fbb, + items__); +} + +struct Tuple FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef TupleBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ITEMS = 4 + }; + const ::flatbuffers::Vector *items() const { + return GetPointer *>(VT_ITEMS); + } + ::flatbuffers::Vector *mutable_items() { + return GetPointer<::flatbuffers::Vector *>(VT_ITEMS); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_ITEMS) && + verifier.VerifyVector(items()) && + verifier.EndTable(); + } +}; + +struct TupleBuilder { + typedef Tuple Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_items(::flatbuffers::Offset<::flatbuffers::Vector> items) { + fbb_.AddOffset(Tuple::VT_ITEMS, items); + } + explicit TupleBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateTuple( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> items = 0) { + TupleBuilder builder_(_fbb); + builder_.add_items(items); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateTupleDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *items = nullptr) { + auto items__ = items ? _fbb.CreateVector(*items) : 0; + return torch::jit::mobile::serialization::CreateTuple( + _fbb, + items__); +} + +struct Dict FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef DictBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_KEYS = 4, + VT_VALUES = 6, + VT_ANNOTATION_STR = 8 + }; + const ::flatbuffers::Vector *keys() const { + return GetPointer *>(VT_KEYS); + } + ::flatbuffers::Vector *mutable_keys() { + return GetPointer<::flatbuffers::Vector *>(VT_KEYS); + } + const ::flatbuffers::Vector *values() const { + return GetPointer *>(VT_VALUES); + } + ::flatbuffers::Vector *mutable_values() { + return GetPointer<::flatbuffers::Vector *>(VT_VALUES); + } + const ::flatbuffers::String *annotation_str() const { + return GetPointer(VT_ANNOTATION_STR); + } + ::flatbuffers::String *mutable_annotation_str() { + return GetPointer<::flatbuffers::String *>(VT_ANNOTATION_STR); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_KEYS) && + verifier.VerifyVector(keys()) && + VerifyOffset(verifier, VT_VALUES) && + verifier.VerifyVector(values()) && + VerifyOffset(verifier, VT_ANNOTATION_STR) && + verifier.VerifyString(annotation_str()) && + verifier.EndTable(); + } +}; + +struct DictBuilder { + typedef Dict Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_keys(::flatbuffers::Offset<::flatbuffers::Vector> keys) { + fbb_.AddOffset(Dict::VT_KEYS, keys); + } + void add_values(::flatbuffers::Offset<::flatbuffers::Vector> values) { + fbb_.AddOffset(Dict::VT_VALUES, values); + } + void add_annotation_str(::flatbuffers::Offset<::flatbuffers::String> annotation_str) { + fbb_.AddOffset(Dict::VT_ANNOTATION_STR, annotation_str); + } + explicit DictBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateDict( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> keys = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> values = 0, + ::flatbuffers::Offset<::flatbuffers::String> annotation_str = 0) { + DictBuilder builder_(_fbb); + builder_.add_annotation_str(annotation_str); + builder_.add_values(values); + builder_.add_keys(keys); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateDictDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *keys = nullptr, + const std::vector *values = nullptr, + const char *annotation_str = nullptr) { + auto keys__ = keys ? _fbb.CreateVector(*keys) : 0; + auto values__ = values ? _fbb.CreateVector(*values) : 0; + auto annotation_str__ = annotation_str ? _fbb.CreateString(annotation_str) : 0; + return torch::jit::mobile::serialization::CreateDict( + _fbb, + keys__, + values__, + annotation_str__); +} + +struct ObjectType FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ObjectTypeBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TYPE_NAME = 4, + VT_TYPE = 6, + VT_ATTR_NAMES = 8 + }; + const ::flatbuffers::String *type_name() const { + return GetPointer(VT_TYPE_NAME); + } + ::flatbuffers::String *mutable_type_name() { + return GetPointer<::flatbuffers::String *>(VT_TYPE_NAME); + } + torch::jit::mobile::serialization::TypeType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + bool mutate_type(torch::jit::mobile::serialization::TypeType _type = static_cast(0)) { + return SetField(VT_TYPE, static_cast(_type), 0); + } + const ::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *attr_names() const { + return GetPointer> *>(VT_ATTR_NAMES); + } + ::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *mutable_attr_names() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *>(VT_ATTR_NAMES); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_TYPE_NAME) && + verifier.VerifyString(type_name()) && + VerifyField(verifier, VT_TYPE, 1) && + VerifyOffset(verifier, VT_ATTR_NAMES) && + verifier.VerifyVector(attr_names()) && + verifier.VerifyVectorOfStrings(attr_names()) && + verifier.EndTable(); + } +}; + +struct ObjectTypeBuilder { + typedef ObjectType Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_type_name(::flatbuffers::Offset<::flatbuffers::String> type_name) { + fbb_.AddOffset(ObjectType::VT_TYPE_NAME, type_name); + } + void add_type(torch::jit::mobile::serialization::TypeType type) { + fbb_.AddElement(ObjectType::VT_TYPE, static_cast(type), 0); + } + void add_attr_names(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> attr_names) { + fbb_.AddOffset(ObjectType::VT_ATTR_NAMES, attr_names); + } + explicit ObjectTypeBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateObjectType( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::String> type_name = 0, + torch::jit::mobile::serialization::TypeType type = torch::jit::mobile::serialization::TypeType::UNSET, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> attr_names = 0) { + ObjectTypeBuilder builder_(_fbb); + builder_.add_attr_names(attr_names); + builder_.add_type_name(type_name); + builder_.add_type(type); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateObjectTypeDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const char *type_name = nullptr, + torch::jit::mobile::serialization::TypeType type = torch::jit::mobile::serialization::TypeType::UNSET, + const std::vector<::flatbuffers::Offset<::flatbuffers::String>> *attr_names = nullptr) { + auto type_name__ = type_name ? _fbb.CreateString(type_name) : 0; + auto attr_names__ = attr_names ? _fbb.CreateVector<::flatbuffers::Offset<::flatbuffers::String>>(*attr_names) : 0; + return torch::jit::mobile::serialization::CreateObjectType( + _fbb, + type_name__, + type, + attr_names__); +} + +struct Object FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ObjectBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TYPE_INDEX = 4, + VT_STATE = 6, + VT_ATTRS = 8, + VT_SETSTATE_FUNC = 10 + }; + uint32_t type_index() const { + return GetField(VT_TYPE_INDEX, 0); + } + bool mutate_type_index(uint32_t _type_index = 0) { + return SetField(VT_TYPE_INDEX, _type_index, 0); + } + uint32_t state() const { + return GetField(VT_STATE, 0); + } + bool mutate_state(uint32_t _state = 0) { + return SetField(VT_STATE, _state, 0); + } + const ::flatbuffers::Vector *attrs() const { + return GetPointer *>(VT_ATTRS); + } + ::flatbuffers::Vector *mutable_attrs() { + return GetPointer<::flatbuffers::Vector *>(VT_ATTRS); + } + uint32_t setstate_func() const { + return GetField(VT_SETSTATE_FUNC, 0); + } + bool mutate_setstate_func(uint32_t _setstate_func = 0) { + return SetField(VT_SETSTATE_FUNC, _setstate_func, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TYPE_INDEX, 4) && + VerifyField(verifier, VT_STATE, 4) && + VerifyOffset(verifier, VT_ATTRS) && + verifier.VerifyVector(attrs()) && + VerifyField(verifier, VT_SETSTATE_FUNC, 4) && + verifier.EndTable(); + } +}; + +struct ObjectBuilder { + typedef Object Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_type_index(uint32_t type_index) { + fbb_.AddElement(Object::VT_TYPE_INDEX, type_index, 0); + } + void add_state(uint32_t state) { + fbb_.AddElement(Object::VT_STATE, state, 0); + } + void add_attrs(::flatbuffers::Offset<::flatbuffers::Vector> attrs) { + fbb_.AddOffset(Object::VT_ATTRS, attrs); + } + void add_setstate_func(uint32_t setstate_func) { + fbb_.AddElement(Object::VT_SETSTATE_FUNC, setstate_func, 0); + } + explicit ObjectBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateObject( + ::flatbuffers::FlatBufferBuilder &_fbb, + uint32_t type_index = 0, + uint32_t state = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> attrs = 0, + uint32_t setstate_func = 0) { + ObjectBuilder builder_(_fbb); + builder_.add_setstate_func(setstate_func); + builder_.add_attrs(attrs); + builder_.add_state(state); + builder_.add_type_index(type_index); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateObjectDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + uint32_t type_index = 0, + uint32_t state = 0, + const std::vector *attrs = nullptr, + uint32_t setstate_func = 0) { + auto attrs__ = attrs ? _fbb.CreateVector(*attrs) : 0; + return torch::jit::mobile::serialization::CreateObject( + _fbb, + type_index, + state, + attrs__, + setstate_func); +} + +struct EnumValue FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef EnumValueBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TYPE_NAME = 4, + VT_VALUE = 6 + }; + const ::flatbuffers::String *type_name() const { + return GetPointer(VT_TYPE_NAME); + } + ::flatbuffers::String *mutable_type_name() { + return GetPointer<::flatbuffers::String *>(VT_TYPE_NAME); + } + uint32_t value() const { + return GetField(VT_VALUE, 0); + } + bool mutate_value(uint32_t _value = 0) { + return SetField(VT_VALUE, _value, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_TYPE_NAME) && + verifier.VerifyString(type_name()) && + VerifyField(verifier, VT_VALUE, 4) && + verifier.EndTable(); + } +}; + +struct EnumValueBuilder { + typedef EnumValue Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_type_name(::flatbuffers::Offset<::flatbuffers::String> type_name) { + fbb_.AddOffset(EnumValue::VT_TYPE_NAME, type_name); + } + void add_value(uint32_t value) { + fbb_.AddElement(EnumValue::VT_VALUE, value, 0); + } + explicit EnumValueBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateEnumValue( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::String> type_name = 0, + uint32_t value = 0) { + EnumValueBuilder builder_(_fbb); + builder_.add_value(value); + builder_.add_type_name(type_name); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateEnumValueDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const char *type_name = nullptr, + uint32_t value = 0) { + auto type_name__ = type_name ? _fbb.CreateString(type_name) : 0; + return torch::jit::mobile::serialization::CreateEnumValue( + _fbb, + type_name__, + value); +} + +struct Operator FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef OperatorBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NAME = 4, + VT_OVERLOAD_NAME = 6, + VT_NUM_ARGS_SERIALIZED = 8 + }; + const ::flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + ::flatbuffers::String *mutable_name() { + return GetPointer<::flatbuffers::String *>(VT_NAME); + } + const ::flatbuffers::String *overload_name() const { + return GetPointer(VT_OVERLOAD_NAME); + } + ::flatbuffers::String *mutable_overload_name() { + return GetPointer<::flatbuffers::String *>(VT_OVERLOAD_NAME); + } + int32_t num_args_serialized() const { + return GetField(VT_NUM_ARGS_SERIALIZED, -1); + } + bool mutate_num_args_serialized(int32_t _num_args_serialized = -1) { + return SetField(VT_NUM_ARGS_SERIALIZED, _num_args_serialized, -1); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyOffset(verifier, VT_OVERLOAD_NAME) && + verifier.VerifyString(overload_name()) && + VerifyField(verifier, VT_NUM_ARGS_SERIALIZED, 4) && + verifier.EndTable(); + } +}; + +struct OperatorBuilder { + typedef Operator Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_name(::flatbuffers::Offset<::flatbuffers::String> name) { + fbb_.AddOffset(Operator::VT_NAME, name); + } + void add_overload_name(::flatbuffers::Offset<::flatbuffers::String> overload_name) { + fbb_.AddOffset(Operator::VT_OVERLOAD_NAME, overload_name); + } + void add_num_args_serialized(int32_t num_args_serialized) { + fbb_.AddElement(Operator::VT_NUM_ARGS_SERIALIZED, num_args_serialized, -1); + } + explicit OperatorBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateOperator( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::String> name = 0, + ::flatbuffers::Offset<::flatbuffers::String> overload_name = 0, + int32_t num_args_serialized = -1) { + OperatorBuilder builder_(_fbb); + builder_.add_num_args_serialized(num_args_serialized); + builder_.add_overload_name(overload_name); + builder_.add_name(name); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateOperatorDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + const char *overload_name = nullptr, + int32_t num_args_serialized = -1) { + auto name__ = name ? _fbb.CreateString(name) : 0; + auto overload_name__ = overload_name ? _fbb.CreateString(overload_name) : 0; + return torch::jit::mobile::serialization::CreateOperator( + _fbb, + name__, + overload_name__, + num_args_serialized); +} + +struct Arg FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ArgBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NAME = 4, + VT_TYPE = 6, + VT_DEFAULT_VALUE = 8 + }; + const ::flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + ::flatbuffers::String *mutable_name() { + return GetPointer<::flatbuffers::String *>(VT_NAME); + } + const ::flatbuffers::String *type() const { + return GetPointer(VT_TYPE); + } + ::flatbuffers::String *mutable_type() { + return GetPointer<::flatbuffers::String *>(VT_TYPE); + } + uint32_t default_value() const { + return GetField(VT_DEFAULT_VALUE, 0); + } + bool mutate_default_value(uint32_t _default_value = 0) { + return SetField(VT_DEFAULT_VALUE, _default_value, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyOffset(verifier, VT_TYPE) && + verifier.VerifyString(type()) && + VerifyField(verifier, VT_DEFAULT_VALUE, 4) && + verifier.EndTable(); + } +}; + +struct ArgBuilder { + typedef Arg Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_name(::flatbuffers::Offset<::flatbuffers::String> name) { + fbb_.AddOffset(Arg::VT_NAME, name); + } + void add_type(::flatbuffers::Offset<::flatbuffers::String> type) { + fbb_.AddOffset(Arg::VT_TYPE, type); + } + void add_default_value(uint32_t default_value) { + fbb_.AddElement(Arg::VT_DEFAULT_VALUE, default_value, 0); + } + explicit ArgBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateArg( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::String> name = 0, + ::flatbuffers::Offset<::flatbuffers::String> type = 0, + uint32_t default_value = 0) { + ArgBuilder builder_(_fbb); + builder_.add_default_value(default_value); + builder_.add_type(type); + builder_.add_name(name); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateArgDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + const char *type = nullptr, + uint32_t default_value = 0) { + auto name__ = name ? _fbb.CreateString(name) : 0; + auto type__ = type ? _fbb.CreateString(type) : 0; + return torch::jit::mobile::serialization::CreateArg( + _fbb, + name__, + type__, + default_value); +} + +struct Schema FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef SchemaBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ARGUMENTS = 4, + VT_RETURNS = 6 + }; + const ::flatbuffers::Vector<::flatbuffers::Offset> *arguments() const { + return GetPointer> *>(VT_ARGUMENTS); + } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_arguments() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_ARGUMENTS); + } + const ::flatbuffers::Vector<::flatbuffers::Offset> *returns() const { + return GetPointer> *>(VT_RETURNS); + } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_returns() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_RETURNS); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_ARGUMENTS) && + verifier.VerifyVector(arguments()) && + verifier.VerifyVectorOfTables(arguments()) && + VerifyOffset(verifier, VT_RETURNS) && + verifier.VerifyVector(returns()) && + verifier.VerifyVectorOfTables(returns()) && + verifier.EndTable(); + } +}; + +struct SchemaBuilder { + typedef Schema Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_arguments(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> arguments) { + fbb_.AddOffset(Schema::VT_ARGUMENTS, arguments); + } + void add_returns(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> returns) { + fbb_.AddOffset(Schema::VT_RETURNS, returns); + } + explicit SchemaBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateSchema( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> arguments = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> returns = 0) { + SchemaBuilder builder_(_fbb); + builder_.add_returns(returns); + builder_.add_arguments(arguments); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateSchemaDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector<::flatbuffers::Offset> *arguments = nullptr, + const std::vector<::flatbuffers::Offset> *returns = nullptr) { + auto arguments__ = arguments ? _fbb.CreateVector<::flatbuffers::Offset>(*arguments) : 0; + auto returns__ = returns ? _fbb.CreateVector<::flatbuffers::Offset>(*returns) : 0; + return torch::jit::mobile::serialization::CreateSchema( + _fbb, + arguments__, + returns__); +} + +struct DebugInfo FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef DebugInfoBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DEBUG_HANDLE = 4 + }; + const ::flatbuffers::Vector *debug_handle() const { + return GetPointer *>(VT_DEBUG_HANDLE); + } + ::flatbuffers::Vector *mutable_debug_handle() { + return GetPointer<::flatbuffers::Vector *>(VT_DEBUG_HANDLE); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_DEBUG_HANDLE) && + verifier.VerifyVector(debug_handle()) && + verifier.EndTable(); + } +}; + +struct DebugInfoBuilder { + typedef DebugInfo Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_debug_handle(::flatbuffers::Offset<::flatbuffers::Vector> debug_handle) { + fbb_.AddOffset(DebugInfo::VT_DEBUG_HANDLE, debug_handle); + } + explicit DebugInfoBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateDebugInfo( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> debug_handle = 0) { + DebugInfoBuilder builder_(_fbb); + builder_.add_debug_handle(debug_handle); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateDebugInfoDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *debug_handle = nullptr) { + auto debug_handle__ = debug_handle ? _fbb.CreateVector(*debug_handle) : 0; + return torch::jit::mobile::serialization::CreateDebugInfo( + _fbb, + debug_handle__); +} + +struct Function FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef FunctionBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_QN = 4, + VT_INSTRUCTIONS = 6, + VT_OPERATORS = 8, + VT_CONSTANTS = 10, + VT_TYPE_ANNOTATIONS = 12, + VT_REGISTER_SIZE = 14, + VT_SCHEMA = 16, + VT_DEBUG_INFO = 18, + VT_CLASS_TYPE = 20 + }; + const ::flatbuffers::String *qn() const { + return GetPointer(VT_QN); + } + ::flatbuffers::String *mutable_qn() { + return GetPointer<::flatbuffers::String *>(VT_QN); + } + const ::flatbuffers::Vector *instructions() const { + return GetPointer *>(VT_INSTRUCTIONS); + } + ::flatbuffers::Vector *mutable_instructions() { + return GetPointer<::flatbuffers::Vector *>(VT_INSTRUCTIONS); + } + const ::flatbuffers::Vector<::flatbuffers::Offset> *operators() const { + return GetPointer> *>(VT_OPERATORS); + } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_operators() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_OPERATORS); + } + const ::flatbuffers::Vector *constants() const { + return GetPointer *>(VT_CONSTANTS); + } + ::flatbuffers::Vector *mutable_constants() { + return GetPointer<::flatbuffers::Vector *>(VT_CONSTANTS); + } + const ::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *type_annotations() const { + return GetPointer> *>(VT_TYPE_ANNOTATIONS); + } + ::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *mutable_type_annotations() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *>(VT_TYPE_ANNOTATIONS); + } + int32_t register_size() const { + return GetField(VT_REGISTER_SIZE, 0); + } + bool mutate_register_size(int32_t _register_size = 0) { + return SetField(VT_REGISTER_SIZE, _register_size, 0); + } + const torch::jit::mobile::serialization::Schema *schema() const { + return GetPointer(VT_SCHEMA); + } + torch::jit::mobile::serialization::Schema *mutable_schema() { + return GetPointer(VT_SCHEMA); + } + const torch::jit::mobile::serialization::DebugInfo *debug_info() const { + return GetPointer(VT_DEBUG_INFO); + } + torch::jit::mobile::serialization::DebugInfo *mutable_debug_info() { + return GetPointer(VT_DEBUG_INFO); + } + uint32_t class_type() const { + return GetField(VT_CLASS_TYPE, 0); + } + bool mutate_class_type(uint32_t _class_type = 0) { + return SetField(VT_CLASS_TYPE, _class_type, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_QN) && + verifier.VerifyString(qn()) && + VerifyOffset(verifier, VT_INSTRUCTIONS) && + verifier.VerifyVector(instructions()) && + VerifyOffset(verifier, VT_OPERATORS) && + verifier.VerifyVector(operators()) && + verifier.VerifyVectorOfTables(operators()) && + VerifyOffset(verifier, VT_CONSTANTS) && + verifier.VerifyVector(constants()) && + VerifyOffset(verifier, VT_TYPE_ANNOTATIONS) && + verifier.VerifyVector(type_annotations()) && + verifier.VerifyVectorOfStrings(type_annotations()) && + VerifyField(verifier, VT_REGISTER_SIZE, 4) && + VerifyOffset(verifier, VT_SCHEMA) && + verifier.VerifyTable(schema()) && + VerifyOffset(verifier, VT_DEBUG_INFO) && + verifier.VerifyTable(debug_info()) && + VerifyField(verifier, VT_CLASS_TYPE, 4) && + verifier.EndTable(); + } +}; + +struct FunctionBuilder { + typedef Function Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_qn(::flatbuffers::Offset<::flatbuffers::String> qn) { + fbb_.AddOffset(Function::VT_QN, qn); + } + void add_instructions(::flatbuffers::Offset<::flatbuffers::Vector> instructions) { + fbb_.AddOffset(Function::VT_INSTRUCTIONS, instructions); + } + void add_operators(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> operators) { + fbb_.AddOffset(Function::VT_OPERATORS, operators); + } + void add_constants(::flatbuffers::Offset<::flatbuffers::Vector> constants) { + fbb_.AddOffset(Function::VT_CONSTANTS, constants); + } + void add_type_annotations(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> type_annotations) { + fbb_.AddOffset(Function::VT_TYPE_ANNOTATIONS, type_annotations); + } + void add_register_size(int32_t register_size) { + fbb_.AddElement(Function::VT_REGISTER_SIZE, register_size, 0); + } + void add_schema(::flatbuffers::Offset schema) { + fbb_.AddOffset(Function::VT_SCHEMA, schema); + } + void add_debug_info(::flatbuffers::Offset debug_info) { + fbb_.AddOffset(Function::VT_DEBUG_INFO, debug_info); + } + void add_class_type(uint32_t class_type) { + fbb_.AddElement(Function::VT_CLASS_TYPE, class_type, 0); + } + explicit FunctionBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateFunction( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::String> qn = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> instructions = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> operators = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> constants = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> type_annotations = 0, + int32_t register_size = 0, + ::flatbuffers::Offset schema = 0, + ::flatbuffers::Offset debug_info = 0, + uint32_t class_type = 0) { + FunctionBuilder builder_(_fbb); + builder_.add_class_type(class_type); + builder_.add_debug_info(debug_info); + builder_.add_schema(schema); + builder_.add_register_size(register_size); + builder_.add_type_annotations(type_annotations); + builder_.add_constants(constants); + builder_.add_operators(operators); + builder_.add_instructions(instructions); + builder_.add_qn(qn); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateFunctionDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const char *qn = nullptr, + const std::vector *instructions = nullptr, + const std::vector<::flatbuffers::Offset> *operators = nullptr, + const std::vector *constants = nullptr, + const std::vector<::flatbuffers::Offset<::flatbuffers::String>> *type_annotations = nullptr, + int32_t register_size = 0, + ::flatbuffers::Offset schema = 0, + ::flatbuffers::Offset debug_info = 0, + uint32_t class_type = 0) { + auto qn__ = qn ? _fbb.CreateString(qn) : 0; + auto instructions__ = instructions ? _fbb.CreateVectorOfStructs(*instructions) : 0; + auto operators__ = operators ? _fbb.CreateVector<::flatbuffers::Offset>(*operators) : 0; + auto constants__ = constants ? _fbb.CreateVector(*constants) : 0; + auto type_annotations__ = type_annotations ? _fbb.CreateVector<::flatbuffers::Offset<::flatbuffers::String>>(*type_annotations) : 0; + return torch::jit::mobile::serialization::CreateFunction( + _fbb, + qn__, + instructions__, + operators__, + constants__, + type_annotations__, + register_size, + schema, + debug_info, + class_type); +} + +struct StorageData FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef StorageDataBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DATA = 4 + }; + const ::flatbuffers::Vector *data() const { + return GetPointer *>(VT_DATA); + } + ::flatbuffers::Vector *mutable_data() { + return GetPointer<::flatbuffers::Vector *>(VT_DATA); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_DATA) && + verifier.VerifyVector(data()) && + verifier.EndTable(); + } +}; + +struct StorageDataBuilder { + typedef StorageData Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_data(::flatbuffers::Offset<::flatbuffers::Vector> data) { + fbb_.AddOffset(StorageData::VT_DATA, data); + } + explicit StorageDataBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateStorageData( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::Vector> data = 0) { + StorageDataBuilder builder_(_fbb); + builder_.add_data(data); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateStorageDataDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *data = nullptr) { + if (data) { _fbb.ForceVectorAlignment(data->size(), sizeof(uint8_t), 16); } + auto data__ = data ? _fbb.CreateVector(*data) : 0; + return torch::jit::mobile::serialization::CreateStorageData( + _fbb, + data__); +} + +struct IValue FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef IValueBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_VAL_TYPE = 4, + VT_VAL = 6 + }; + torch::jit::mobile::serialization::IValueUnion val_type() const { + return static_cast(GetField(VT_VAL_TYPE, 0)); + } + const void *val() const { + return GetPointer(VT_VAL); + } + template const T *val_as() const; + const torch::jit::mobile::serialization::Int *val_as_Int() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::Int ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::Bool *val_as_Bool() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::Bool ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::Double *val_as_Double() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::Double ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::ComplexDouble *val_as_ComplexDouble() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::ComplexDouble ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::TensorMetadata *val_as_TensorMetadata() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::TensorMetadata ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::String *val_as_String() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::String ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::List *val_as_List() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::List ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::Tuple *val_as_Tuple() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::Tuple ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::Dict *val_as_Dict() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::Dict ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::Object *val_as_Object() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::Object ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::IntList *val_as_IntList() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::IntList ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::DoubleList *val_as_DoubleList() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::DoubleList ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::BoolList *val_as_BoolList() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::BoolList ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::Device *val_as_Device() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::Device ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::EnumValue *val_as_EnumValue() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::EnumValue ? static_cast(val()) : nullptr; + } + const torch::jit::mobile::serialization::Function *val_as_Function() const { + return val_type() == torch::jit::mobile::serialization::IValueUnion::Function ? static_cast(val()) : nullptr; + } + void *mutable_val() { + return GetPointer(VT_VAL); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_VAL_TYPE, 1) && + VerifyOffset(verifier, VT_VAL) && + VerifyIValueUnion(verifier, val(), val_type()) && + verifier.EndTable(); + } +}; + +template<> inline const torch::jit::mobile::serialization::Int *IValue::val_as() const { + return val_as_Int(); +} + +template<> inline const torch::jit::mobile::serialization::Bool *IValue::val_as() const { + return val_as_Bool(); +} + +template<> inline const torch::jit::mobile::serialization::Double *IValue::val_as() const { + return val_as_Double(); +} + +template<> inline const torch::jit::mobile::serialization::ComplexDouble *IValue::val_as() const { + return val_as_ComplexDouble(); +} + +template<> inline const torch::jit::mobile::serialization::TensorMetadata *IValue::val_as() const { + return val_as_TensorMetadata(); +} + +template<> inline const torch::jit::mobile::serialization::String *IValue::val_as() const { + return val_as_String(); +} + +template<> inline const torch::jit::mobile::serialization::List *IValue::val_as() const { + return val_as_List(); +} + +template<> inline const torch::jit::mobile::serialization::Tuple *IValue::val_as() const { + return val_as_Tuple(); +} + +template<> inline const torch::jit::mobile::serialization::Dict *IValue::val_as() const { + return val_as_Dict(); +} + +template<> inline const torch::jit::mobile::serialization::Object *IValue::val_as() const { + return val_as_Object(); +} + +template<> inline const torch::jit::mobile::serialization::IntList *IValue::val_as() const { + return val_as_IntList(); +} + +template<> inline const torch::jit::mobile::serialization::DoubleList *IValue::val_as() const { + return val_as_DoubleList(); +} + +template<> inline const torch::jit::mobile::serialization::BoolList *IValue::val_as() const { + return val_as_BoolList(); +} + +template<> inline const torch::jit::mobile::serialization::Device *IValue::val_as() const { + return val_as_Device(); +} + +template<> inline const torch::jit::mobile::serialization::EnumValue *IValue::val_as() const { + return val_as_EnumValue(); +} + +template<> inline const torch::jit::mobile::serialization::Function *IValue::val_as() const { + return val_as_Function(); +} + +struct IValueBuilder { + typedef IValue Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_val_type(torch::jit::mobile::serialization::IValueUnion val_type) { + fbb_.AddElement(IValue::VT_VAL_TYPE, static_cast(val_type), 0); + } + void add_val(::flatbuffers::Offset val) { + fbb_.AddOffset(IValue::VT_VAL, val); + } + explicit IValueBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateIValue( + ::flatbuffers::FlatBufferBuilder &_fbb, + torch::jit::mobile::serialization::IValueUnion val_type = torch::jit::mobile::serialization::IValueUnion::NONE, + ::flatbuffers::Offset val = 0) { + IValueBuilder builder_(_fbb); + builder_.add_val(val); + builder_.add_val_type(val_type); + return builder_.Finish(); +} + +struct ExtraFile FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ExtraFileBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NAME = 4, + VT_CONTENT = 6 + }; + const ::flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + ::flatbuffers::String *mutable_name() { + return GetPointer<::flatbuffers::String *>(VT_NAME); + } + const ::flatbuffers::String *content() const { + return GetPointer(VT_CONTENT); + } + ::flatbuffers::String *mutable_content() { + return GetPointer<::flatbuffers::String *>(VT_CONTENT); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyOffset(verifier, VT_CONTENT) && + verifier.VerifyString(content()) && + verifier.EndTable(); + } +}; + +struct ExtraFileBuilder { + typedef ExtraFile Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_name(::flatbuffers::Offset<::flatbuffers::String> name) { + fbb_.AddOffset(ExtraFile::VT_NAME, name); + } + void add_content(::flatbuffers::Offset<::flatbuffers::String> content) { + fbb_.AddOffset(ExtraFile::VT_CONTENT, content); + } + explicit ExtraFileBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateExtraFile( + ::flatbuffers::FlatBufferBuilder &_fbb, + ::flatbuffers::Offset<::flatbuffers::String> name = 0, + ::flatbuffers::Offset<::flatbuffers::String> content = 0) { + ExtraFileBuilder builder_(_fbb); + builder_.add_content(content); + builder_.add_name(name); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateExtraFileDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + const char *content = nullptr) { + auto name__ = name ? _fbb.CreateString(name) : 0; + auto content__ = content ? _fbb.CreateString(content) : 0; + return torch::jit::mobile::serialization::CreateExtraFile( + _fbb, + name__, + content__); +} + +struct Module FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table { + typedef ModuleBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BYTECODE_VERSION = 4, + VT_EXTRA_FILES = 6, + VT_METHODS = 8, + VT_STATE_OBJ = 10, + VT_IVALUES = 12, + VT_STORAGE_DATA_SIZE = 14, + VT_STORAGE_DATA = 16, + VT_OBJECT_TYPES = 18, + VT_JIT_SOURCES = 20, + VT_JIT_CONSTANTS = 22, + VT_OPERATOR_VERSION = 24, + VT_MOBILE_IVALUE_SIZE = 26 + }; + uint32_t bytecode_version() const { + return GetField(VT_BYTECODE_VERSION, 0); + } + bool mutate_bytecode_version(uint32_t _bytecode_version = 0) { + return SetField(VT_BYTECODE_VERSION, _bytecode_version, 0); + } + const ::flatbuffers::Vector<::flatbuffers::Offset> *extra_files() const { + return GetPointer> *>(VT_EXTRA_FILES); + } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_extra_files() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_EXTRA_FILES); + } + const ::flatbuffers::Vector *methods() const { + return GetPointer *>(VT_METHODS); + } + ::flatbuffers::Vector *mutable_methods() { + return GetPointer<::flatbuffers::Vector *>(VT_METHODS); + } + uint32_t state_obj() const { + return GetField(VT_STATE_OBJ, 0); + } + bool mutate_state_obj(uint32_t _state_obj = 0) { + return SetField(VT_STATE_OBJ, _state_obj, 0); + } + const ::flatbuffers::Vector<::flatbuffers::Offset> *ivalues() const { + return GetPointer> *>(VT_IVALUES); + } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_ivalues() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_IVALUES); + } + int32_t storage_data_size() const { + return GetField(VT_STORAGE_DATA_SIZE, 0); + } + bool mutate_storage_data_size(int32_t _storage_data_size = 0) { + return SetField(VT_STORAGE_DATA_SIZE, _storage_data_size, 0); + } + const ::flatbuffers::Vector<::flatbuffers::Offset> *storage_data() const { + return GetPointer> *>(VT_STORAGE_DATA); + } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_storage_data() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_STORAGE_DATA); + } + const ::flatbuffers::Vector<::flatbuffers::Offset> *object_types() const { + return GetPointer> *>(VT_OBJECT_TYPES); + } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_object_types() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_OBJECT_TYPES); + } + const ::flatbuffers::Vector<::flatbuffers::Offset> *jit_sources() const { + return GetPointer> *>(VT_JIT_SOURCES); + } + ::flatbuffers::Vector<::flatbuffers::Offset> *mutable_jit_sources() { + return GetPointer<::flatbuffers::Vector<::flatbuffers::Offset> *>(VT_JIT_SOURCES); + } + const ::flatbuffers::Vector *jit_constants() const { + return GetPointer *>(VT_JIT_CONSTANTS); + } + ::flatbuffers::Vector *mutable_jit_constants() { + return GetPointer<::flatbuffers::Vector *>(VT_JIT_CONSTANTS); + } + uint32_t operator_version() const { + return GetField(VT_OPERATOR_VERSION, 0); + } + bool mutate_operator_version(uint32_t _operator_version = 0) { + return SetField(VT_OPERATOR_VERSION, _operator_version, 0); + } + uint32_t mobile_ivalue_size() const { + return GetField(VT_MOBILE_IVALUE_SIZE, 0); + } + bool mutate_mobile_ivalue_size(uint32_t _mobile_ivalue_size = 0) { + return SetField(VT_MOBILE_IVALUE_SIZE, _mobile_ivalue_size, 0); + } + bool Verify(::flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BYTECODE_VERSION, 4) && + VerifyOffset(verifier, VT_EXTRA_FILES) && + verifier.VerifyVector(extra_files()) && + verifier.VerifyVectorOfTables(extra_files()) && + VerifyOffset(verifier, VT_METHODS) && + verifier.VerifyVector(methods()) && + VerifyField(verifier, VT_STATE_OBJ, 4) && + VerifyOffset(verifier, VT_IVALUES) && + verifier.VerifyVector(ivalues()) && + verifier.VerifyVectorOfTables(ivalues()) && + VerifyField(verifier, VT_STORAGE_DATA_SIZE, 4) && + VerifyOffset(verifier, VT_STORAGE_DATA) && + verifier.VerifyVector(storage_data()) && + verifier.VerifyVectorOfTables(storage_data()) && + VerifyOffset(verifier, VT_OBJECT_TYPES) && + verifier.VerifyVector(object_types()) && + verifier.VerifyVectorOfTables(object_types()) && + VerifyOffset(verifier, VT_JIT_SOURCES) && + verifier.VerifyVector(jit_sources()) && + verifier.VerifyVectorOfTables(jit_sources()) && + VerifyOffset(verifier, VT_JIT_CONSTANTS) && + verifier.VerifyVector(jit_constants()) && + VerifyField(verifier, VT_OPERATOR_VERSION, 4) && + VerifyField(verifier, VT_MOBILE_IVALUE_SIZE, 4) && + verifier.EndTable(); + } +}; + +struct ModuleBuilder { + typedef Module Table; + ::flatbuffers::FlatBufferBuilder &fbb_; + ::flatbuffers::uoffset_t start_; + void add_bytecode_version(uint32_t bytecode_version) { + fbb_.AddElement(Module::VT_BYTECODE_VERSION, bytecode_version, 0); + } + void add_extra_files(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> extra_files) { + fbb_.AddOffset(Module::VT_EXTRA_FILES, extra_files); + } + void add_methods(::flatbuffers::Offset<::flatbuffers::Vector> methods) { + fbb_.AddOffset(Module::VT_METHODS, methods); + } + void add_state_obj(uint32_t state_obj) { + fbb_.AddElement(Module::VT_STATE_OBJ, state_obj, 0); + } + void add_ivalues(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> ivalues) { + fbb_.AddOffset(Module::VT_IVALUES, ivalues); + } + void add_storage_data_size(int32_t storage_data_size) { + fbb_.AddElement(Module::VT_STORAGE_DATA_SIZE, storage_data_size, 0); + } + void add_storage_data(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> storage_data) { + fbb_.AddOffset(Module::VT_STORAGE_DATA, storage_data); + } + void add_object_types(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> object_types) { + fbb_.AddOffset(Module::VT_OBJECT_TYPES, object_types); + } + void add_jit_sources(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> jit_sources) { + fbb_.AddOffset(Module::VT_JIT_SOURCES, jit_sources); + } + void add_jit_constants(::flatbuffers::Offset<::flatbuffers::Vector> jit_constants) { + fbb_.AddOffset(Module::VT_JIT_CONSTANTS, jit_constants); + } + void add_operator_version(uint32_t operator_version) { + fbb_.AddElement(Module::VT_OPERATOR_VERSION, operator_version, 0); + } + void add_mobile_ivalue_size(uint32_t mobile_ivalue_size) { + fbb_.AddElement(Module::VT_MOBILE_IVALUE_SIZE, mobile_ivalue_size, 0); + } + explicit ModuleBuilder(::flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + ::flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = ::flatbuffers::Offset(end); + return o; + } +}; + +inline ::flatbuffers::Offset CreateModule( + ::flatbuffers::FlatBufferBuilder &_fbb, + uint32_t bytecode_version = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> extra_files = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> methods = 0, + uint32_t state_obj = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> ivalues = 0, + int32_t storage_data_size = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> storage_data = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> object_types = 0, + ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset>> jit_sources = 0, + ::flatbuffers::Offset<::flatbuffers::Vector> jit_constants = 0, + uint32_t operator_version = 0, + uint32_t mobile_ivalue_size = 0) { + ModuleBuilder builder_(_fbb); + builder_.add_mobile_ivalue_size(mobile_ivalue_size); + builder_.add_operator_version(operator_version); + builder_.add_jit_constants(jit_constants); + builder_.add_jit_sources(jit_sources); + builder_.add_object_types(object_types); + builder_.add_storage_data(storage_data); + builder_.add_storage_data_size(storage_data_size); + builder_.add_ivalues(ivalues); + builder_.add_state_obj(state_obj); + builder_.add_methods(methods); + builder_.add_extra_files(extra_files); + builder_.add_bytecode_version(bytecode_version); + return builder_.Finish(); +} + +inline ::flatbuffers::Offset CreateModuleDirect( + ::flatbuffers::FlatBufferBuilder &_fbb, + uint32_t bytecode_version = 0, + const std::vector<::flatbuffers::Offset> *extra_files = nullptr, + const std::vector *methods = nullptr, + uint32_t state_obj = 0, + const std::vector<::flatbuffers::Offset> *ivalues = nullptr, + int32_t storage_data_size = 0, + const std::vector<::flatbuffers::Offset> *storage_data = nullptr, + const std::vector<::flatbuffers::Offset> *object_types = nullptr, + const std::vector<::flatbuffers::Offset> *jit_sources = nullptr, + const std::vector *jit_constants = nullptr, + uint32_t operator_version = 0, + uint32_t mobile_ivalue_size = 0) { + auto extra_files__ = extra_files ? _fbb.CreateVector<::flatbuffers::Offset>(*extra_files) : 0; + auto methods__ = methods ? _fbb.CreateVector(*methods) : 0; + auto ivalues__ = ivalues ? _fbb.CreateVector<::flatbuffers::Offset>(*ivalues) : 0; + auto storage_data__ = storage_data ? _fbb.CreateVector<::flatbuffers::Offset>(*storage_data) : 0; + auto object_types__ = object_types ? _fbb.CreateVector<::flatbuffers::Offset>(*object_types) : 0; + auto jit_sources__ = jit_sources ? _fbb.CreateVector<::flatbuffers::Offset>(*jit_sources) : 0; + auto jit_constants__ = jit_constants ? _fbb.CreateVector(*jit_constants) : 0; + return torch::jit::mobile::serialization::CreateModule( + _fbb, + bytecode_version, + extra_files__, + methods__, + state_obj, + ivalues__, + storage_data_size, + storage_data__, + object_types__, + jit_sources__, + jit_constants__, + operator_version, + mobile_ivalue_size); +} + +inline bool VerifyIValueUnion(::flatbuffers::Verifier &verifier, const void *obj, IValueUnion type) { + switch (type) { + case IValueUnion::NONE: { + return true; + } + case IValueUnion::Int: { + return verifier.VerifyField(static_cast(obj), 0, 8); + } + case IValueUnion::Bool: { + return verifier.VerifyField(static_cast(obj), 0, 1); + } + case IValueUnion::Double: { + return verifier.VerifyField(static_cast(obj), 0, 8); + } + case IValueUnion::ComplexDouble: { + return verifier.VerifyField(static_cast(obj), 0, 8); + } + case IValueUnion::TensorMetadata: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case IValueUnion::String: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case IValueUnion::List: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case IValueUnion::Tuple: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case IValueUnion::Dict: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case IValueUnion::Object: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case IValueUnion::IntList: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case IValueUnion::DoubleList: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case IValueUnion::BoolList: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case IValueUnion::Device: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case IValueUnion::EnumValue: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case IValueUnion::Function: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + default: return true; + } +} + +inline bool VerifyIValueUnionVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset> *values, const ::flatbuffers::Vector *types) { + if (!values || !types) return !values && !types; + if (values->size() != types->size()) return false; + for (::flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { + if (!VerifyIValueUnion( + verifier, values->Get(i), types->GetEnum(i))) { + return false; + } + } + return true; +} + +inline const torch::jit::mobile::serialization::Module *GetModule(const void *buf) { + return ::flatbuffers::GetRoot(buf); +} + +inline const torch::jit::mobile::serialization::Module *GetSizePrefixedModule(const void *buf) { + return ::flatbuffers::GetSizePrefixedRoot(buf); +} + +inline Module *GetMutableModule(void *buf) { + return ::flatbuffers::GetMutableRoot(buf); +} + +inline torch::jit::mobile::serialization::Module *GetMutableSizePrefixedModule(void *buf) { + return ::flatbuffers::GetMutableSizePrefixedRoot(buf); +} + +inline const char *ModuleIdentifier() { + return "PTMF"; +} + +inline bool ModuleBufferHasIdentifier(const void *buf) { + return ::flatbuffers::BufferHasIdentifier( + buf, ModuleIdentifier()); +} + +inline bool SizePrefixedModuleBufferHasIdentifier(const void *buf) { + return ::flatbuffers::BufferHasIdentifier( + buf, ModuleIdentifier(), true); +} + +inline bool VerifyModuleBuffer( + ::flatbuffers::Verifier &verifier) { + return verifier.VerifyBuffer(ModuleIdentifier()); +} + +inline bool VerifySizePrefixedModuleBuffer( + ::flatbuffers::Verifier &verifier) { + return verifier.VerifySizePrefixedBuffer(ModuleIdentifier()); +} + +inline void FinishModuleBuffer( + ::flatbuffers::FlatBufferBuilder &fbb, + ::flatbuffers::Offset root) { + fbb.Finish(root, ModuleIdentifier()); +} + +inline void FinishSizePrefixedModuleBuffer( + ::flatbuffers::FlatBufferBuilder &fbb, + ::flatbuffers::Offset root) { + fbb.FinishSizePrefixed(root, ModuleIdentifier()); +} + +} // namespace serialization +} // namespace mobile +} // namespace jit +} // namespace torch + +#endif // FLATBUFFERS_GENERATED_MOBILEBYTECODE_TORCH_JIT_MOBILE_SERIALIZATION_H_ diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/onnx.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/onnx.h new file mode 100644 index 0000000000000000000000000000000000000000..813064d4fabb927b9125b791d892d82483c53845 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/onnx.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API std::string prettyPrint(const ::ONNX_NAMESPACE::ModelProto& model); + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/pickle.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/pickle.h new file mode 100644 index 0000000000000000000000000000000000000000..a546867e744238331ce692dac99b416733f1a53e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/pickle.h @@ -0,0 +1,107 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +/// Pickle an IValue by calling a function to handle writing the data. +/// +/// `writer` is a function that takes in a pointer to a chunk of memory and its +/// size and consumes it. +/// +/// See `jit::pickle` for more details. +TORCH_API void pickle( + std::function writer, + const IValue& ivalue, + std::vector* tensor_table = nullptr); + +/// Save a `torch::IValue` in a format compatible with Python's `pickle` module +/// +/// If present, `tensor_table` is a pointer to a table in which tensors that +/// are contained within `ivalue` are stored, and the bytes returned by the +/// pickler will only include references to these tensors in the table. This can +/// be used to keep the binary blob size small. +/// If not provided, tensors are stored in the same byte stream as the pickle +/// data, similar to `torch.save()` in eager Python. +/// +/// Pickled values can be loaded in Python and C++: +/// \rst +/// .. code-block:: cpp +/// +/// torch::IValue float_value(2.3); +/// +/// // TODO: when tensors are stored in the pickle, delete this +/// std::vector tensor_table; +/// auto data = torch::jit::pickle(float_value, &tensor_table); +/// +/// std::vector ivalues = +/// torch::jit::unpickle(data.data(), data.size()); +/// +/// .. code-block:: python +/// +/// values = torch.load('data.pkl') +/// print(values) +/// +/// \endrst +TORCH_API std::vector pickle( + const IValue& ivalue, + std::vector* tensor_table = nullptr); + +/// Save a `torch::IValue` in a format that can be loaded by both +/// `torch::pickle_load` in C++ and `torch.load` in Python. +TORCH_API std::vector pickle_save(const IValue& ivalue); + +/// Deserialize a `torch::IValue` from bytes produced by either +/// `torch::pickle_save` in C++ or `torch.save` in Python +TORCH_API IValue pickle_load(const std::vector& data); + +/// `reader` is a function that takes in a size to read from some pickled +/// binary. `reader` should remember where it last read, and return +/// the number of bytes read. +/// See `torch::pickle` for details. +/// type_resolver is used to resolve any JIT type based on type str +TORCH_API IValue unpickle( + std::function reader, + TypeResolver type_resolver, + c10::ArrayRef tensor_table, + c10::TypePtr (*type_parser)(const std::string&) = + Unpickler::defaultTypeParser, + ObjLoader obj_loader = nullptr); + +/// Decode a chunk of memory containing pickled data into its `torch::IValue`s. +/// +/// If any `torch::IValue`s in the pickled data are `Object`s, then a +/// `class_resolver` function must be provided. +/// +/// See `torch::pickle` for details. +TORCH_API IValue unpickle( + const char* data, + size_t size, + TypeResolver type_resolver = nullptr, + c10::ArrayRef tensor_table = {}, + c10::TypePtr (*type_parser)(const std::string&) = + Unpickler::defaultTypeParser); + +/// Decode a chunk of memory containing pickled data into its `torch::IValue`s. +/// +/// If any `torch::IValue`s in the pickled data are `Object`s, then a +/// `class_resolver` function must be provided. +/// +/// See `torch::pickle` for details. +TORCH_API IValue unpickle( + const char* data, + size_t size, + ObjLoader obj_loader, + TypeResolver type_resolver = nullptr, + c10::ArrayRef tensor_table = {}, + c10::TypePtr (*type_parser)(const std::string&) = + Unpickler::defaultTypeParser); + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/pickler.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/pickler.h new file mode 100644 index 0000000000000000000000000000000000000000..4f553b6f7ca8a912eeb8a9d1153d5a3c0bad220d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/pickler.h @@ -0,0 +1,429 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +// See Python's pickletools.py for a detailed description of each of these codes +enum class PickleOpCode : char { + MARK = '(', + STOP = '.', + POP = '0', + POP_MARK = '1', + DUP = '2', + FLOAT = 'F', + INT = 'I', + BININT = 'J', + BININT1 = 'K', + LONG = 'L', + BININT2 = 'M', + NONE = 'N', + PERSID = 'P', + BINPERSID = 'Q', + REDUCE = 'R', + STRING = 'S', + BINSTRING = 'T', + SHORT_BINSTRING = 'U', + // NB: Avoid using UNICODE as it is a macro in the Windows API + UNICODE_ = 'V', + BINUNICODE = 'X', + APPEND = 'a', + BUILD = 'b', + GLOBAL = 'c', + DICT = 'd', + EMPTY_DICT = '}', + APPENDS = 'e', + GET = 'g', + BINGET = 'h', + INST = 'i', + LONG_BINGET = 'j', + LIST = 'l', + EMPTY_LIST = ']', + OBJ = 'o', + PUT = 'p', + BINPUT = 'q', + LONG_BINPUT = 'r', + SETITEM = 's', + TUPLE = 't', + EMPTY_TUPLE = ')', + SETITEMS = 'u', + BINFLOAT = 'G', + + // Protocol 2 + PROTO = char('\x80'), + NEWOBJ = '\x81', + EXT1 = '\x82', + EXT2 = '\x83', + EXT4 = '\x84', + TUPLE1 = '\x85', + TUPLE2 = '\x86', + TUPLE3 = '\x87', + NEWTRUE = '\x88', + NEWFALSE = '\x89', + LONG1 = '\x8a', + LONG4 = '\x8b', + + // Protocol 3 (Python 3.x) + BINBYTES = 'B', + SHORT_BINBYTES = 'C', + + // Protocol 4 + SHORT_BINUNICODE = char('\x8c'), + BINUNICODE8 = '\x8d', + BINBYTES8 = '\x8e', + EMPTY_SET = '\x8f', + ADDITEMS = '\x90', + FROZENSET = '\x91', + NEWOBJ_EX = '\x92', + STACK_GLOBAL = '\x93', + MEMOIZE = '\x94', + FRAME = '\x95' +}; + +using ::c10::IValue; + +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +struct WriteableTensorData { + const char* data() const { + return static_cast(tensor_.storage().data()); + } + size_t sizeInBytes() const { + return size_; + } + size_t nbytes() const { + return tensor_.storage().nbytes(); + } + bool storageHasDeleter() const { + return tensor_.storage().data_ptr().get_context() != nullptr; + } + + private: + friend TORCH_API WriteableTensorData + getWriteableTensorData(const at::Tensor& tensor, bool to_cpu); + at::Tensor tensor_; + uint64_t size_; +}; + +void setTypeTags(bool state); +bool getTypeTags(); + +class TORCH_API Pickler { + AT_DISALLOW_COPY_AND_ASSIGN(Pickler); + + public: + Pickler(std::function writer) + : Pickler(std::move(writer), nullptr, nullptr, nullptr) {} + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Pickler( + std::function writer, + std::vector* tensor_table, + std::function type_renamer, + std::vector* memoized_class_types, + std::function get_tensor_id = nullptr, + bool tag_aggregates = true) + : writer_(std::move(writer)), + tensor_table_(tensor_table), + type_renamer_(std::move(type_renamer)), + memoized_class_types_(memoized_class_types), + get_tensor_id_(std::move(get_tensor_id)), + tag_aggregates_(tag_aggregates) {} + // NOLINTNEXTLINE(bugprone-exception-escape) + ~Pickler(); + + // Push protocol onto the stack + void protocol(); + + // Push STOP PickleOpCode onto the stack + void stop(); + + void pushIValue(const IValue& ivalue); + + void startTuple(); + void endTuple(); + + const std::vector& tensorData() { + return tensor_data_; + } + + void pushEmptyDict(); + void pushDict(const IValue& ivalue); + void pushInt(int64_t value); + void pushLong(const std::string& data); + + private: + void pushIValueImpl(const IValue& ivalue); + void startTypeTag(); + void endTypeTag(const IValue& value); + void pushBool(bool value); + void pushDouble(double value); + void pushComplexDouble(const IValue& value); + void pushGenericList(const IValue& ivalue); + void pushIntList(const IValue& ivalue); + void pushList(const IValue& ivalue); + void pushTensor(const IValue& ivalue); + void pushTensorReference(const IValue& ivalue); + void pushLiteralTensor(const IValue& ivalue); + void pushLiteralSparseTensor(const at::Tensor& tensor); + void pushTuple(const IValue& ivalue); + void pushString(const std::string& string); + void pushDevice(const IValue& ivalue); +#ifdef USE_DISTRIBUTED + void pushRRef(const IValue& ivalue); +#endif + // unmemoized version + void pushStringImpl(const std::string& string); + void pushStorageOfTensor(const at::Tensor& tensor); + + void pushBinGet(uint32_t memo_id); + void pushSpecializedList( + const IValue& ivalue, + const char* list_name, + const std::function& item_pusher); + void pushGlobal(c10::string_view module_name, c10::string_view class_name); + // raw string data is appended directly to the byte stream + void pushBytes(const std::string& string); + void pushTensorData(const at::Tensor& tensor); + + // Add a BINPUT op and return the memoization id used + size_t pushNextBinPut(); + + const void* getPointer(const IValue& ivalue); + + // Caller checks that bufferPos_ > 0 + void flushNonEmpty() { + writer_(buffer_.data(), bufferPos_); + bufferPos_ = 0; + } + + void flush() { + if (bufferPos_ != 0) { + flushNonEmpty(); + } + } + + // These convert values to bytes and add them to the stack (NB: since T is to + // the left of a '::', its type cannot be deduced by the compiler so one must + // explicitly instantiate the template, i.e. push(int) works, push(int) + // does not) + static CONSTEXPR_EXCEPT_WIN_CUDA size_t kBufferSize = 256; + template + void push(typename std::common_type::type value) { + const char* begin = reinterpret_cast(&value); + if (bufferPos_ + sizeof(T) > buffer_.size()) { + flushNonEmpty(); + } + static_assert(sizeof(T) <= kBufferSize, "Buffer size assumption"); + memcpy(buffer_.data() + bufferPos_, begin, sizeof(T)); + bufferPos_ += sizeof(T); + } + + // Stream to write binary data to + // Code shouldn't call writer_ directly without first flush()ing. + std::function writer_; + + // Buffer to avoid calling a writer_ on a per-byte basis. + std::array buffer_; + size_t bufferPos_{0}; + + // Stack of opcodes/data + std::vector stack_; + + // External table of tensors to serialize. If this is missing, then tensors + // are serialized directly into the pickle + std::vector* tensor_table_; + + // TODO: only use this if necessary (add a pass to find all shared ivalues, + // and only memoize those) + uint32_t memo_id_ = 0; + + // Memoization of IValues that have been written (index in table is used for + // BINPUT opcodes) to enable shared references + c10::FastMap memoized_ivalue_map_; + + // because we de-dup ivalues based on their raw pointer address in the above + // map we need to keep all the memoized values alive during the pickle. + // Otherwise, it is possible that a raw address gets reused for another + // object, and we will alias it to the old object at that address. + std::vector memoized_ivalues_; + + std::function type_renamer_; + + // List of all the types that it wrote, inspect from the IValues it wrote. + std::vector* memoized_class_types_; + + // Function to grab next id_name for tensor storage, function is responsible + // for returning unique ids + std::function get_tensor_id_; + + // List of tensor storages to serialize in the same binary as the pickle data + // similar to ivalues, they are memoized using BINPUT + std::vector tensor_data_; + c10::FastMap memoized_storage_map_; + + c10::FastMap memoized_globals_map_; + c10::FastMap memoized_strings_map_; + c10::FastMap memoized_devices_map_; + // when true, List and Dict objects will be wrapped in a + // torch.jit._pickle.restore_type_tag call to correctly set the dynamic + // TorchScript type for the object. When true the thing unpickling must have + // torch installed. + bool tag_aggregates_; +}; + +// returns a (tensor, record_size) for a tensor, converting it to a CPU tensor +// if it was CUDA and to_cpu is True. +TORCH_API WriteableTensorData +getWriteableTensorData(const at::Tensor& tensor, bool to_cpu = true); + +// return the value of the tensor's storage pointer +uint64_t getStorageKey(const at::Tensor& tensor); + +// if the cls has __getstate__/__setstate__ +// assert they have the right schema and return true, +// otherwise return false +bool checkHasValidSetGetState(const std::shared_ptr& cls); + +// Declare BackendMeta serialization and deserialization function pointer types. +using BackendMetaPtr = std::function< + void(const at::Tensor&, std::unordered_map&)>; + +// A allowlist of device type, currently available is PrivateUse1 +inline std::unordered_set& GetBackendMetaAllowlist() { + static std::unordered_set DeviceTypeAllowlist{ + c10::DeviceType::PrivateUse1}; + return DeviceTypeAllowlist; +} + +// Dynamically obtain serialization function pairs +// that require the corresponding backend. +inline std::array< + c10::optional>, + at::COMPILE_TIME_MAX_DEVICE_TYPES>& +GetBackendMetaSerialization() { + // The array to save function pointer for BackendMeta serialization. + // key is the DeviceType, value is std::pair obj. + // value.first represent get function and value.seconde represent set function + static std::array< + c10::optional>, + at::COMPILE_TIME_MAX_DEVICE_TYPES> + BackendMetaSerialization; + return BackendMetaSerialization; +} + +// Register function pointer of Tensor BackendMetadata for serialization. +TORCH_API inline void TensorBackendMetaRegistry( + c10::DeviceType t, + const BackendMetaPtr& get_fptr, + const BackendMetaPtr& set_fptr) { + // allowlist verification + // Only if the devicetype is in the allowlist, + // we allow the serialization extension to be registered for backendmeta data. + const auto& DeviceTypeAllowlist = GetBackendMetaAllowlist(); + TORCH_CHECK( + DeviceTypeAllowlist.find(t) != DeviceTypeAllowlist.end(), + "It is not allowed to register the serialization method ", + "of backendMeta data for PrivateUse1. ", + "If you have related serialization requirements, ", + "please expand the allowlist"); + // Register function pointer + int device_type = static_cast(t); + auto& BackendMetaSerialization = GetBackendMetaSerialization(); + TORCH_CHECK( + !BackendMetaSerialization[device_type].has_value(), + "The tensor BackendMeta serialization function pointer for ", + t, + " has been registered."); + BackendMetaSerialization[device_type] = + c10::optional>( + std::make_pair(get_fptr, set_fptr)); +} + +// Return a map of Tensor Metadata which including BackendMetaData for +// serialization. For now, it only takes care of `conj` and `neg` bit. +inline std::unordered_map getTensorMetadata( + const at::Tensor& t) { + // We don't support serializing `ZeroTensor` as it is not public + // facing yet. + TORCH_CHECK( + !t._is_zerotensor(), + "ZeroTensor is not serializable,", + " please file an issue if required."); + std::unordered_map metadata{}; + + // Only add meta-data if the value is not default. + if (t.is_conj()) { + metadata["conj"] = true; + } + if (t.is_neg()) { + metadata["neg"] = true; + } + // Only add BackendMetaData for custom backend if the function pointer is + // registered. + int device_type = static_cast(t.device().type()); + const auto& BackendMetaSerialization = GetBackendMetaSerialization(); + if (BackendMetaSerialization[device_type].has_value()) { + // Pass the tensor and metadata map references as parameters to the custom + // serialization function. + BackendMetaPtr fptr = BackendMetaSerialization[device_type].value().first; + fptr(t, metadata); + } + return metadata; +} + +// set Tensor Metadata based on the map. +// Refer: getTensorMetadata +inline void setTensorMetadata( + const at::Tensor& t, + std::unordered_map metadata) { + auto iter_end = metadata.end(); + auto iter_temp = metadata.find("conj"); + if (iter_temp != iter_end) { + t._set_conj(true); + metadata.erase(iter_temp); + } + iter_temp = metadata.find("neg"); + if (iter_temp != iter_end) { + t._set_neg(true); + metadata.erase(iter_temp); + } + // Only set BackendMetaData for custom backend if the function pointer is + // registered. + int device_type = static_cast(t.device().type()); + const auto& BackendMetaSerialization = GetBackendMetaSerialization(); + if (BackendMetaSerialization[device_type].has_value()) { + // Pass the tensor and metadata map references as parameters to the custom + // deserialization function. + BackendMetaPtr fptr = BackendMetaSerialization[device_type].value().second; + fptr(t, metadata); + } +} + +// set Tensor metadata based on the map. +// NOTE: This overload is required by unpickler.cpp +inline void setTensorMetadata( + const at::Tensor& t, + const c10::Dict& metadata_idict) { + std::unordered_map metadata; + for (auto& pair : metadata_idict) { + auto key = *pair.key().toString(); + metadata[key] = pair.value().toBool(); + } + setTensorMetadata(t, std::move(metadata)); +} + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/python_print.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/python_print.h new file mode 100644 index 0000000000000000000000000000000000000000..ede364ec02f81e4e4652fb659c801b62bbc8a99e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/python_print.h @@ -0,0 +1,58 @@ +#pragma once +#include +#include +#include +#include + +namespace torch { +namespace jit { + +struct Method; +struct Module; +struct PythonPrintImpl; + +struct PrintDepsTable { + void add(const c10::NamedTypePtr& type); + + size_t size() const { + return table_.size(); + } + + const c10::NamedTypePtr& operator[](size_t index) const { + return table_[index]; + } + + private: + std::vector table_; + std::unordered_set non_unique_; +}; + +struct TORCH_API PythonPrint { + PythonPrint( + std::vector& constant_table, + PrintDepsTable& deps_table, + c10::TypePrinter type_printer = nullptr, + bool enforce_importable = false); + + void printNamedType(const c10::NamedTypePtr& classType); + void printFunction(const Function& callee); + void printMethod(const Function& callee); + + std::string str() const; + const SourceRangeRecords& ranges() const; + uint64_t minVersion() const; + + private: + std::shared_ptr pImpl; +}; + +TORCH_API bool printerHasSpecialCaseFor(c10::Symbol sym); + +TORCH_API void jitModuleToPythonCodeAndConstants( + const Module& module, + ExtraFilesMap* jit_sources, // output + std::vector* constants // output +); + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/source_range_serialization.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/source_range_serialization.h new file mode 100644 index 0000000000000000000000000000000000000000..bbfd533cd17891d415cd10681a1816fc7a9d78a1 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/source_range_serialization.h @@ -0,0 +1,68 @@ +#pragma once + +#include +#include + +#include + +#include +#include + +namespace c10 { +struct IValue; +} + +namespace torch { +namespace jit { + +class Pickler; +class SourceRangeSerializer; +static constexpr size_t kByteOffsetIndex = 0; +static constexpr size_t kSourceRangeIndex = 1; +static constexpr size_t kSourceRangeTagIndex = 2; +constexpr c10::string_view kFormatWithStringTable = "FORMAT_WITH_STRING_TABLE"; + +class SourceRangePickler { + public: + SourceRangePickler(); + + std::vector pickle( + const SourceRangeRecords& ranges, + const SourceRangeTagMap& source_range_tags); + + private: + std::shared_ptr srs; +}; + +class SourceRangeDeserializer { + public: + SourceRangeDeserializer() = default; + explicit SourceRangeDeserializer(const c10::IValue& text_table) { + for (const auto& x : text_table.toTuple()->elements()) { + text_table_.emplace_back(std::make_shared(x.toStringRef())); + } + } + SourceRange deserialize(const c10::IValue& iv); + + private: + std::shared_ptr deserialize_source(const c10::IValue& iv); + std::unordered_map< + c10::intrusive_ptr, + std::shared_ptr> + cached_sources; + std::vector> text_table_; +}; + +class SourceRangeUnpickler { + public: + virtual c10::optional findSourceRangeThatGenerated( + const SourceRange& range) = 0; + + virtual ~SourceRangeUnpickler() = default; +}; + +TORCH_API void setShouldUseFormatWithStringTable( + bool should_use_format_with_string_table); + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/storage_context.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/storage_context.h new file mode 100644 index 0000000000000000000000000000000000000000..d065fc003cf4f4f5e7314e829f06b4c350b2143b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/storage_context.h @@ -0,0 +1,85 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Used in torch.package and TorchScript serialization to coordinate +// sharing of storages between models. Also used to create deterministic +// naming for storages. +class TORCH_API SerializationStorageContext { + public: + explicit SerializationStorageContext() = default; + SerializationStorageContext operator=(const SerializationStorageContext&) = + delete; + SerializationStorageContext(const SerializationStorageContext&) = delete; + + uint64_t getOrAddStorage(const c10::Storage& storage) { + if (!hasStorage(storage)) { + uint64_t size = storage_id_map_.size(); + storage_id_map_[storage] = size; + } + return storage_id_map_[storage]; + } + + bool hasStorage(const c10::Storage& storage) { + return storage_id_map_.find(storage) != storage_id_map_.end(); + } + + ~SerializationStorageContext() = default; + + private: + class StorageSerializationHash { + public: + size_t operator()(const c10::Storage& storage) const { + return std::hash()( + reinterpret_cast(storage.unsafeGetStorageImpl())); + } + }; + + class StorageSerializationEqual { + public: + bool operator()(const c10::Storage& lhs, const c10::Storage& rhs) const { + return lhs.unsafeGetStorageImpl() == rhs.unsafeGetStorageImpl(); + } + }; + + std::unordered_map< + c10::Storage, + uint64_t, + StorageSerializationHash, + StorageSerializationEqual> + storage_id_map_; +}; + +// Used in torch.package and TorchScript deserialization to coordinate +// sharing of storages between models. +class TORCH_API DeserializationStorageContext { + public: + explicit DeserializationStorageContext() = default; + DeserializationStorageContext operator=( + const DeserializationStorageContext&) = delete; + DeserializationStorageContext(const DeserializationStorageContext&) = delete; + + void addStorage(std::string name, c10::Storage storage) { + TORCH_INTERNAL_ASSERT(!hasStorage(name)); + name_storage_map_.emplace(std::move(name), std::move(storage)); + } + + bool hasStorage(const std::string& name) { + return name_storage_map_.find(name) != name_storage_map_.end(); + } + + c10::Storage getStorage(const std::string& name) { + TORCH_INTERNAL_ASSERT(hasStorage(name)); + return name_storage_map_.find(name)->second; + } + ~DeserializationStorageContext() = default; + + private: + std::unordered_map name_storage_map_; +}; + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/type_name_uniquer.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/type_name_uniquer.h new file mode 100644 index 0000000000000000000000000000000000000000..addedc5492cff19673d777887185cc894bf77321 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/type_name_uniquer.h @@ -0,0 +1,33 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +/** + * class TypeNameUniquer + * + * Generates a unique name for every type `t` passed in. Types that compare + * equal with EqualType will receive the same unique name. + * + * This is used during Module::save(), to resolve type name collisions during + * serialization. + */ +class TORCH_API TypeNameUniquer { + public: + c10::QualifiedName getUniqueName(c10::ConstNamedTypePtr t); + + private: + NameMangler mangler_; + std::unordered_set used_names_; + std::unordered_map< + c10::ConstNamedTypePtr, + c10::QualifiedName, + HashType, + EqualType> + name_map_; +}; +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/unpickler.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/unpickler.h new file mode 100644 index 0000000000000000000000000000000000000000..bc980bf90522b5c2abec5d99cc3a06a2f66082e1 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/unpickler.h @@ -0,0 +1,203 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +using TypeResolver = + std::function; + +using ObjLoader = std::function< + c10::intrusive_ptr(const at::StrongTypePtr&, IValue)>; + +class DeserializationStorageContext; + +// [unpickler refactor] there is some cruft around PickleOpCode::BUILD, +// PickleOpCode::NEWOBJ, and the last_opcode_ member below that should be +// deleted at some point, the Pickler doesn't produce it and it's only around to +// support models saved before 1.1 +class TORCH_API Unpickler { + AT_DISALLOW_COPY_AND_ASSIGN(Unpickler); + + using TypeParserT = c10::TypePtr (*)(const std::string&); + + public: + // tensors inside the pickle are references to the tensor_table. + // class_resolver is to resolve strong class type, type_resolver_ is + // to resolve any JIT type. class_resolver and type_resolver are not merged + // here because some use cases need to get strong class type that + // type_resolver_ can not return. + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Unpickler( + std::function reader, + TypeResolver type_resolver, + c10::ArrayRef tensor_table, + TypeParserT type_parser = defaultTypeParser) + : reader_(std::move(reader)), + tensor_table_(tensor_table), + type_resolver_(std::move(type_resolver)), + use_storage_device_(false), + type_parser_(type_parser), + version_(caffe2::serialize::kProducedFileFormatVersion) {} + + Unpickler( + std::function reader, + TypeResolver type_resolver, + c10::ArrayRef tensor_table, + ObjLoader obj_loader, + TypeParserT type_parser = defaultTypeParser) + : reader_(std::move(reader)), + tensor_table_(tensor_table), + type_resolver_(std::move(type_resolver)), + obj_loader_(std::move(obj_loader)), + use_storage_device_(false), + type_parser_(type_parser), + version_(caffe2::serialize::kProducedFileFormatVersion) {} + + // tensors inside the pickle contain meta-data, the raw tensor + // dead is retrieved by calling `read_record`. + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Unpickler( + std::function reader, + TypeResolver type_resolver, + ObjLoader obj_loader, + std::function read_record, + c10::optional device, + bool use_storage_device = false, + TypeParserT type_parser = defaultTypeParser, + std::shared_ptr storage_context = nullptr) + : reader_(std::move(reader)), + tensor_table_(), + type_resolver_(std::move(type_resolver)), + obj_loader_(std::move(obj_loader)), + read_record_(std::move(read_record)), + // NOLINTNEXTLINE(performance-move-const-arg) + device_(std::move(device)), + use_storage_device_(use_storage_device), + type_parser_(type_parser), + storage_context_(std::move(storage_context)), + version_(caffe2::serialize::kProducedFileFormatVersion) {} + + // consume the pickle stream, producing an IValue from the contents. + // Type Tags: the pickler will restore the type tags on + // List and Dict objects when possible IValue is an Object. + // Otherwise, Dict and List objects will end up with Any as their tag. + // If you know the type of the ivalue, tags can be restored with + // restoreAccurateTypeTags + IValue parse_ivalue(); + + // [type tag serialization] + // This is used to determine whether to restore type tags be recursively + // descending into the returned stack object (if version_number <= 2), or + // if version_number >= 3, to use the type strings included in the pickle + // archive for container types. By default this is set to + // `kProducedFileFormatVersion` so unless you're loading a pickle file + // from alongside a corresponding `version` file, you don't need to set + // the version manually. + void set_version(uint64_t version_number) { + version_ = version_number; + } + + static c10::TypePtr defaultTypeParser(const std::string& str) { + ScriptTypeParser parser; + return parser.parseType(str); + } + + private: + // No arguments ensures that a template argument must be specified + // so that the number of bytes read / type read is explicit + template + T read() { + T item; + if (sizeof(T) <= buffer_remaining_) { + // Fast path: entirely from buffer. + memcpy(&item, buffer_.data() + buffer_pos_, sizeof(T)); + buffer_remaining_ -= sizeof(T); + buffer_pos_ += sizeof(T); + } else { + // Don't over-template the slow path, to avoid code size bloat. + readSlowWithBuffer(reinterpret_cast(&item), sizeof(T)); + } + return item; + } + void readSlowWithBuffer(char* dest, size_t sz); + std::string readBytes(size_t num_bytes); + + double readFloat(); + void readGlobal( + const std::string& module_name, + const std::string& class_name); + void rebuildTensor(bool quantized); + void rebuildTensorFromTypeV2(); + void rebuildSparseTensor(); +#ifdef USE_DISTRIBUTED + void rebuildRRef(); +#endif + PickleOpCode readInstruction(); + PickleOpCode readOpCode() { + return static_cast(read()); + } + std::string readString(); + void readList(IValue list_ivalue); + void readListElements(IValue list_ivalue, size_t start); + void setInput(size_t memo_id); + void run(); + + // Returns the number of bytes read. This should statefully + // remember the position. Don't call reader_ directly. + std::function reader_; + // Small buffer to avoid calling reader_ on a per-byte basis. + std::array buffer_; + size_t buffer_pos_{0}; + size_t buffer_remaining_{0}; + + std::vector stack_; + + // globals are represented on the stack as IValue integer indices + // into this list + std::vector> globals_; + std::vector memo_table_; + std::vector marks_; + c10::ArrayRef tensor_table_; + + // When deserializing types on lists and dicts, cache the type here + // so we don't have to parse the same type multiple times. Strings + // are already de-duplicated and replaced with BINGETs in the + // pickler, so we can just use the actual data pointer of each string. + std::unordered_map type_cache_; + + // optionally nullptr, needs to be present for creating classes + TypeResolver type_resolver_; + ObjLoader obj_loader_; + IValue empty_tuple_; + + std::function read_record_; + c10::optional device_; + // When set to true, Unpickler will ignore the pickled device and use the + // device of the DataPtr returned by the read_record_ function. The default + // value of this flag is false. + const bool use_storage_device_; + + TypeParserT type_parser_{defaultTypeParser}; + + // Used for torch.package to enable sharing of storages across + // ScriptModules and eager modules + std::shared_ptr storage_context_; + + // See [type tag serialization] + uint64_t version_; + + // See [NOTE] skip_next_read_global + uint8_t skip_next_read_global = 0; +}; + +void restoreAccurateTypeTags(const IValue& root, const c10::TypePtr& type_tag); + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/byte_order.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/byte_order.h new file mode 100644 index 0000000000000000000000000000000000000000..d960b287e20fbc363dbe53bc7676bd6bf28c6759 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/byte_order.h @@ -0,0 +1,227 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __FreeBSD__ +#include +#include +#define thp_bswap16(x) bswap16(x) +#define thp_bswap32(x) bswap32(x) +#define thp_bswap64(x) bswap64(x) +#elif defined(__APPLE__) +#include +#define thp_bswap16(x) OSSwapInt16(x) +#define thp_bswap32(x) OSSwapInt32(x) +#define thp_bswap64(x) OSSwapInt64(x) +#elif defined(__GNUC__) && !defined(__MINGW32__) +#include +#define thp_bswap16(x) bswap_16(x) +#define thp_bswap32(x) bswap_32(x) +#define thp_bswap64(x) bswap_64(x) +#elif defined _WIN32 || defined _WIN64 +#define thp_bswap16(x) _byteswap_ushort(x) +#define thp_bswap32(x) _byteswap_ulong(x) +#define thp_bswap64(x) _byteswap_uint64(x) +#endif + +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +#define to_be16(x) thp_bswap16(x) +#define from_be16(x) thp_bswap16(x) +#define to_be32(x) thp_bswap32(x) +#define from_be32(x) thp_bswap32(x) +#define to_be64(x) thp_bswap64(x) +#define from_be64(x) thp_bswap64(x) +#define to_le16(x) (x) +#define from_le16(x) (x) +#define to_le32(x) (x) +#define from_le32(x) (x) +#define to_le64(x) (x) +#define from_le64(x) (x) +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#define to_be16(x) (x) +#define from_be16(x) (x) +#define to_be32(x) (x) +#define from_be32(x) (x) +#define to_be64(x) (x) +#define from_be64(x) (x) +#define to_le16(x) thp_bswap16(x) +#define from_le16(x) thp_bswap16(x) +#define to_le32(x) thp_bswap32(x) +#define from_le32(x) thp_bswap32(x) +#define to_le64(x) thp_bswap64(x) +#define from_le64(x) thp_bswap64(x) +#else +#error Unexpected or undefined __BYTE_ORDER__ +#endif + +namespace torch { +namespace utils { + +enum THPByteOrder { THP_LITTLE_ENDIAN = 0, THP_BIG_ENDIAN = 1 }; + +TORCH_API THPByteOrder THP_nativeByteOrder(); + +TORCH_API void THP_decodeInt16Buffer( + int16_t* dst, + const uint8_t* src, + bool do_byte_swap, + size_t len); +TORCH_API void THP_decodeInt32Buffer( + int32_t* dst, + const uint8_t* src, + bool do_byte_swap, + size_t len); +TORCH_API void THP_decodeInt64Buffer( + int64_t* dst, + const uint8_t* src, + bool do_byte_swap, + size_t len); +TORCH_API void THP_decodeHalfBuffer( + c10::Half* dst, + const uint8_t* src, + bool do_byte_swap, + size_t len); +TORCH_API void THP_decodeFloatBuffer( + float* dst, + const uint8_t* src, + bool do_byte_swap, + size_t len); +TORCH_API void THP_decodeDoubleBuffer( + double* dst, + const uint8_t* src, + bool do_byte_swap, + size_t len); +TORCH_API void THP_decodeBoolBuffer( + bool* dst, + const uint8_t* src, + bool do_byte_swap, + size_t len); +TORCH_API void THP_decodeBFloat16Buffer( + at::BFloat16* dst, + const uint8_t* src, + bool do_byte_swap, + size_t len); +TORCH_API void THP_decodeComplexFloatBuffer( + c10::complex* dst, + const uint8_t* src, + bool do_byte_swap, + size_t len); +TORCH_API void THP_decodeComplexDoubleBuffer( + c10::complex* dst, + const uint8_t* src, + bool do_byte_swap, + size_t len); + +TORCH_API void THP_decodeInt16Buffer( + int16_t* dst, + const uint8_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_decodeInt32Buffer( + int32_t* dst, + const uint8_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_decodeInt64Buffer( + int64_t* dst, + const uint8_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_decodeHalfBuffer( + c10::Half* dst, + const uint8_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_decodeFloatBuffer( + float* dst, + const uint8_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_decodeDoubleBuffer( + double* dst, + const uint8_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_decodeBoolBuffer( + bool* dst, + const uint8_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_decodeBFloat16Buffer( + at::BFloat16* dst, + const uint8_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_decodeFloat8_e5m2Buffer( + at::Float8_e5m2* dst, + const uint8_t* src, + size_t len); +TORCH_API void THP_decodeFloat8_e4m3fnBuffer( + at::Float8_e4m3fn* dst, + const uint8_t* src, + size_t len); +TORCH_API void THP_decodeFloat8_e5m2fnuzBuffer( + at::Float8_e5m2fnuz* dst, + const uint8_t* src, + size_t len); +TORCH_API void THP_decodeFloat8_e4m3fnuzBuffer( + at::Float8_e4m3fnuz* dst, + const uint8_t* src, + size_t len); +TORCH_API void THP_decodeComplexFloatBuffer( + c10::complex* dst, + const uint8_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_decodeComplexDoubleBuffer( + c10::complex* dst, + const uint8_t* src, + THPByteOrder order, + size_t len); + +TORCH_API void THP_encodeInt16Buffer( + uint8_t* dst, + const int16_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_encodeInt32Buffer( + uint8_t* dst, + const int32_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_encodeInt64Buffer( + uint8_t* dst, + const int64_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_encodeFloatBuffer( + uint8_t* dst, + const float* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_encodeDoubleBuffer( + uint8_t* dst, + const double* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_encodeComplexFloatBuffer( + uint8_t* dst, + const c10::complex* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_encodeComplexDoubleBuffer( + uint8_t* dst, + const c10::complex* src, + THPByteOrder order, + size_t len); + +} // namespace utils +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/disable_torch_function.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/disable_torch_function.h new file mode 100644 index 0000000000000000000000000000000000000000..8fc5118830eb7d7c016ef3a78deb342c24a81c07 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/disable_torch_function.h @@ -0,0 +1,41 @@ +#pragma once +#include +#include +#include + +namespace torch { +// Sometimes we don't want infinite recursion for subclasses, +// Or a way to achieve the old behaviour. + +// This is an internal utility, not exposed to users. +bool torch_function_enabled(); +PyObject* disabled_torch_function_impl(); +PyObject* disabled_torch_dispatch_impl(); +void set_disabled_torch_function_impl(PyObject* value); +void set_disabled_torch_dispatch_impl(PyObject* value); +// Set ignore_mode to true if you're trying to collect overloaded arguments; +// using mode here will improperly cause you to add ALL objects to the +// overloaded list even if they don't actually have __torch_function__ +bool check_has_torch_function(PyObject* obj, bool ignore_mode = false); + +struct DisableTorchDispatch { + DisableTorchDispatch() + : guard_(c10::DispatchKey::Python), + guard_tls_snapshot_(c10::DispatchKey::PythonTLSSnapshot) {} + c10::impl::ExcludeDispatchKeyGuard guard_; + c10::impl::ExcludeDispatchKeyGuard guard_tls_snapshot_; +}; + +} // namespace torch + +PyObject* THPModule_isEnabledTorchFunction(PyObject* self, PyObject* unused); +PyObject* THPModule_DisableTorchFunctionType(); +PyObject* THPModule_DisableTorchFunctionSubclassType(); +PyObject* THPModule_disable_torch_function(PyObject* self, PyObject* args); +PyObject* THPModule_disable_torch_dispatch(PyObject* self, PyObject* args); +PyObject* THPModule_has_torch_function(PyObject*, PyObject* arg); +PyObject* THPModule_has_torch_function_unary(PyObject*, PyObject* obj); +PyObject* THPModule_has_torch_function_variadic( + PyObject*, + PyObject* const* args, + Py_ssize_t nargs); diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pythoncapi_compat.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pythoncapi_compat.h new file mode 100644 index 0000000000000000000000000000000000000000..05072be63ad18fc9e0d350fc6fedf4bf85261b3f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pythoncapi_compat.h @@ -0,0 +1,716 @@ +// Header file providing new C API functions to old Python versions. +// +// File distributed under the Zero Clause BSD (0BSD) license. +// Copyright Contributors to the pythoncapi_compat project. +// +// Homepage: +// https://github.com/python/pythoncapi_compat +// +// Latest version: +// https://raw.githubusercontent.com/python/pythoncapi_compat/master/pythoncapi_compat.h +// +// SPDX-License-Identifier: 0BSD + +#ifndef PYTHONCAPI_COMPAT +#define PYTHONCAPI_COMPAT + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "frameobject.h" // PyFrameObject, PyFrame_GetBack() + + +// Compatibility with Visual Studio 2013 and older which don't support +// the inline keyword in C (only in C++): use __inline instead. +#if (defined(_MSC_VER) && _MSC_VER < 1900 \ + && !defined(__cplusplus) && !defined(inline)) +# define PYCAPI_COMPAT_STATIC_INLINE(TYPE) static __inline TYPE +#else +# define PYCAPI_COMPAT_STATIC_INLINE(TYPE) static inline TYPE +#endif + + +#ifndef _Py_CAST +# define _Py_CAST(type, expr) ((type)(expr)) +#endif + +// On C++11 and newer, _Py_NULL is defined as nullptr on C++11, +// otherwise it is defined as NULL. +#ifndef _Py_NULL +# if defined(__cplusplus) && __cplusplus >= 201103 +# define _Py_NULL nullptr +# else +# define _Py_NULL NULL +# endif +#endif + +// Cast argument to PyObject* type. +#ifndef _PyObject_CAST +# define _PyObject_CAST(op) _Py_CAST(PyObject*, op) +#endif + + +// bpo-42262 added Py_NewRef() to Python 3.10.0a3 +#if PY_VERSION_HEX < 0x030A00A3 && !defined(Py_NewRef) +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +_Py_NewRef(PyObject *obj) +{ + Py_INCREF(obj); + return obj; +} +#define Py_NewRef(obj) _Py_NewRef(_PyObject_CAST(obj)) +#endif + + +// bpo-42262 added Py_XNewRef() to Python 3.10.0a3 +#if PY_VERSION_HEX < 0x030A00A3 && !defined(Py_XNewRef) +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +_Py_XNewRef(PyObject *obj) +{ + Py_XINCREF(obj); + return obj; +} +#define Py_XNewRef(obj) _Py_XNewRef(_PyObject_CAST(obj)) +#endif + + +// bpo-39573 added Py_SET_REFCNT() to Python 3.9.0a4 +#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_REFCNT) +PYCAPI_COMPAT_STATIC_INLINE(void) +_Py_SET_REFCNT(PyObject *ob, Py_ssize_t refcnt) +{ + ob->ob_refcnt = refcnt; +} +#define Py_SET_REFCNT(ob, refcnt) _Py_SET_REFCNT(_PyObject_CAST(ob), refcnt) +#endif + + +// Py_SETREF() and Py_XSETREF() were added to Python 3.5.2. +// It is excluded from the limited C API. +#if (PY_VERSION_HEX < 0x03050200 && !defined(Py_SETREF)) && !defined(Py_LIMITED_API) +#define Py_SETREF(dst, src) \ + do { \ + PyObject **_tmp_dst_ptr = _Py_CAST(PyObject**, &(dst)); \ + PyObject *_tmp_dst = (*_tmp_dst_ptr); \ + *_tmp_dst_ptr = _PyObject_CAST(src); \ + Py_DECREF(_tmp_dst); \ + } while (0) + +#define Py_XSETREF(dst, src) \ + do { \ + PyObject **_tmp_dst_ptr = _Py_CAST(PyObject**, &(dst)); \ + PyObject *_tmp_dst = (*_tmp_dst_ptr); \ + *_tmp_dst_ptr = _PyObject_CAST(src); \ + Py_XDECREF(_tmp_dst); \ + } while (0) +#endif + + +// bpo-43753 added Py_Is(), Py_IsNone(), Py_IsTrue() and Py_IsFalse() +// to Python 3.10.0b1. +#if PY_VERSION_HEX < 0x030A00B1 && !defined(Py_Is) +# define Py_Is(x, y) ((x) == (y)) +#endif +#if PY_VERSION_HEX < 0x030A00B1 && !defined(Py_IsNone) +# define Py_IsNone(x) Py_Is(x, Py_None) +#endif +#if PY_VERSION_HEX < 0x030A00B1 && !defined(Py_IsTrue) +# define Py_IsTrue(x) Py_Is(x, Py_True) +#endif +#if PY_VERSION_HEX < 0x030A00B1 && !defined(Py_IsFalse) +# define Py_IsFalse(x) Py_Is(x, Py_False) +#endif + + +// bpo-39573 added Py_SET_TYPE() to Python 3.9.0a4 +#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_TYPE) +PYCAPI_COMPAT_STATIC_INLINE(void) +_Py_SET_TYPE(PyObject *ob, PyTypeObject *type) +{ + ob->ob_type = type; +} +#define Py_SET_TYPE(ob, type) _Py_SET_TYPE(_PyObject_CAST(ob), type) +#endif + + +// bpo-39573 added Py_SET_SIZE() to Python 3.9.0a4 +#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_SET_SIZE) +PYCAPI_COMPAT_STATIC_INLINE(void) +_Py_SET_SIZE(PyVarObject *ob, Py_ssize_t size) +{ + ob->ob_size = size; +} +#define Py_SET_SIZE(ob, size) _Py_SET_SIZE((PyVarObject*)(ob), size) +#endif + + +// bpo-40421 added PyFrame_GetCode() to Python 3.9.0b1 +#if PY_VERSION_HEX < 0x030900B1 || defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyCodeObject*) +PyFrame_GetCode(PyFrameObject *frame) +{ + assert(frame != _Py_NULL); + assert(frame->f_code != _Py_NULL); + return _Py_CAST(PyCodeObject*, Py_NewRef(frame->f_code)); +} +#endif + +PYCAPI_COMPAT_STATIC_INLINE(PyCodeObject*) +_PyFrame_GetCodeBorrow(PyFrameObject *frame) +{ + PyCodeObject *code = PyFrame_GetCode(frame); + Py_DECREF(code); + return code; +} + + +// bpo-40421 added PyFrame_GetBack() to Python 3.9.0b1 +#if PY_VERSION_HEX < 0x030900B1 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyFrameObject*) +PyFrame_GetBack(PyFrameObject *frame) +{ + assert(frame != _Py_NULL); + return _Py_CAST(PyFrameObject*, Py_XNewRef(frame->f_back)); +} +#endif + +#if !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyFrameObject*) +_PyFrame_GetBackBorrow(PyFrameObject *frame) +{ + PyFrameObject *back = PyFrame_GetBack(frame); + Py_XDECREF(back); + return back; +} +#endif + + +// bpo-40421 added PyFrame_GetLocals() to Python 3.11.0a7 +#if PY_VERSION_HEX < 0x030B00A7 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyFrame_GetLocals(PyFrameObject *frame) +{ +#if PY_VERSION_HEX >= 0x030400B1 + if (PyFrame_FastToLocalsWithError(frame) < 0) { + return NULL; + } +#else + PyFrame_FastToLocals(frame); +#endif + return Py_NewRef(frame->f_locals); +} +#endif + + +// bpo-40421 added PyFrame_GetGlobals() to Python 3.11.0a7 +#if PY_VERSION_HEX < 0x030B00A7 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyFrame_GetGlobals(PyFrameObject *frame) +{ + return Py_NewRef(frame->f_globals); +} +#endif + + +// bpo-40421 added PyFrame_GetBuiltins() to Python 3.11.0a7 +#if PY_VERSION_HEX < 0x030B00A7 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyFrame_GetBuiltins(PyFrameObject *frame) +{ + return Py_NewRef(frame->f_builtins); +} +#endif + + +// bpo-40421 added PyFrame_GetLasti() to Python 3.11.0b1 +#if PY_VERSION_HEX < 0x030B00B1 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(int) +PyFrame_GetLasti(PyFrameObject *frame) +{ +#if PY_VERSION_HEX >= 0x030A00A7 + // bpo-27129: Since Python 3.10.0a7, f_lasti is an instruction offset, + // not a bytes offset anymore. Python uses 16-bit "wordcode" (2 bytes) + // instructions. + if (frame->f_lasti < 0) { + return -1; + } + return frame->f_lasti * 2; +#else + return frame->f_lasti; +#endif +} +#endif + + +// gh-91248 added PyFrame_GetVar() to Python 3.12.0a2 +#if PY_VERSION_HEX < 0x030C00A2 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyFrame_GetVar(PyFrameObject *frame, PyObject *name) +{ + PyObject *locals, *value; + + locals = PyFrame_GetLocals(frame); + if (locals == NULL) { + return NULL; + } +#if PY_VERSION_HEX >= 0x03000000 + value = PyDict_GetItemWithError(locals, name); +#else + value = PyDict_GetItem(locals, name); +#endif + Py_DECREF(locals); + + if (value == NULL) { + if (PyErr_Occurred()) { + return NULL; + } +#if PY_VERSION_HEX >= 0x03000000 + PyErr_Format(PyExc_NameError, "variable %R does not exist", name); +#else + PyErr_SetString(PyExc_NameError, "variable does not exist"); +#endif + return NULL; + } + return Py_NewRef(value); +} +#endif + + +// gh-91248 added PyFrame_GetVarString() to Python 3.12.0a2 +#if PY_VERSION_HEX < 0x030C00A2 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyFrame_GetVarString(PyFrameObject *frame, const char *name) +{ + PyObject *name_obj, *value; + name_obj = PyUnicode_FromString(name); + if (name_obj == NULL) { + return NULL; + } + value = PyFrame_GetVar(frame, name_obj); + Py_DECREF(name_obj); + return value; +} +#endif + + +// bpo-39947 added PyThreadState_GetInterpreter() to Python 3.9.0a5 +#if PY_VERSION_HEX < 0x030900A5 || defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyInterpreterState *) +PyThreadState_GetInterpreter(PyThreadState *tstate) +{ + assert(tstate != _Py_NULL); + return tstate->interp; +} +#endif + + +// bpo-40429 added PyThreadState_GetFrame() to Python 3.9.0b1 +#if PY_VERSION_HEX < 0x030900B1 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyFrameObject*) +PyThreadState_GetFrame(PyThreadState *tstate) +{ + assert(tstate != _Py_NULL); + return _Py_CAST(PyFrameObject *, Py_XNewRef(tstate->frame)); +} +#endif + +#if !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyFrameObject*) +_PyThreadState_GetFrameBorrow(PyThreadState *tstate) +{ + PyFrameObject *frame = PyThreadState_GetFrame(tstate); + Py_XDECREF(frame); + return frame; +} +#endif + + +// bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a5 +#if PY_VERSION_HEX < 0x030900A5 || defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyInterpreterState*) +PyInterpreterState_Get(void) +{ + PyThreadState *tstate; + PyInterpreterState *interp; + + tstate = PyThreadState_GET(); + if (tstate == _Py_NULL) { + Py_FatalError("GIL released (tstate is NULL)"); + } + interp = tstate->interp; + if (interp == _Py_NULL) { + Py_FatalError("no current interpreter"); + } + return interp; +} +#endif + + +// bpo-39947 added PyInterpreterState_Get() to Python 3.9.0a6 +#if 0x030700A1 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x030900A6 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(uint64_t) +PyThreadState_GetID(PyThreadState *tstate) +{ + assert(tstate != _Py_NULL); + return tstate->id; +} +#endif + +// bpo-43760 added PyThreadState_EnterTracing() to Python 3.11.0a2 +#if PY_VERSION_HEX < 0x030B00A2 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(void) +PyThreadState_EnterTracing(PyThreadState *tstate) +{ + tstate->tracing++; +#if PY_VERSION_HEX >= 0x030A00A1 + tstate->cframe->use_tracing = 0; +#else + tstate->use_tracing = 0; +#endif +} +#endif + +// bpo-43760 added PyThreadState_LeaveTracing() to Python 3.11.0a2 +#if PY_VERSION_HEX < 0x030B00A2 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(void) +PyThreadState_LeaveTracing(PyThreadState *tstate) +{ + int use_tracing = (tstate->c_tracefunc != _Py_NULL + || tstate->c_profilefunc != _Py_NULL); + tstate->tracing--; +#if PY_VERSION_HEX >= 0x030A00A1 + tstate->cframe->use_tracing = use_tracing; +#else + tstate->use_tracing = use_tracing; +#endif +} +#endif + + +// bpo-37194 added PyObject_CallNoArgs() to Python 3.9.0a1 +// PyObject_CallNoArgs() added to PyPy 3.9.16-v7.3.11 +#if !defined(PyObject_CallNoArgs) && PY_VERSION_HEX < 0x030900A1 +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyObject_CallNoArgs(PyObject *func) +{ + return PyObject_CallFunctionObjArgs(func, NULL); +} + +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyObject_CallMethodNoArgs(PyObject *obj, PyObject *name) +{ + return PyObject_CallMethodObjArgs(obj, name, NULL); +} +#endif + + +// bpo-39245 made PyObject_CallOneArg() public (previously called +// _PyObject_CallOneArg) in Python 3.9.0a4 +// PyObject_CallOneArg() added to PyPy 3.9.16-v7.3.11 +#if !defined(PyObject_CallOneArg) && PY_VERSION_HEX < 0x030900A4 +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyObject_CallOneArg(PyObject *func, PyObject *arg) +{ + return PyObject_CallFunctionObjArgs(func, arg, NULL); +} + +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyObject_CallMethodOneArg(PyObject *obj, PyObject *name, PyObject *arg) +{ + return PyObject_CallMethodObjArgs(obj, name, arg, NULL); +} +#endif + + +// bpo-1635741 added PyModule_AddObjectRef() to Python 3.10.0a3 +#if PY_VERSION_HEX < 0x030A00A3 +PYCAPI_COMPAT_STATIC_INLINE(int) +PyModule_AddObjectRef(PyObject *module, const char *name, PyObject *value) +{ + int res; + Py_XINCREF(value); + res = PyModule_AddObject(module, name, value); + if (res < 0) { + Py_XDECREF(value); + } + return res; +} +#endif + + +// bpo-40024 added PyModule_AddType() to Python 3.9.0a5 +#if PY_VERSION_HEX < 0x030900A5 +PYCAPI_COMPAT_STATIC_INLINE(int) +PyModule_AddType(PyObject *module, PyTypeObject *type) +{ + const char *name, *dot; + + if (PyType_Ready(type) < 0) { + return -1; + } + + // inline _PyType_Name() + name = type->tp_name; + assert(name != _Py_NULL); + dot = strrchr(name, '.'); + if (dot != _Py_NULL) { + name = dot + 1; + } + + return PyModule_AddObjectRef(module, name, _PyObject_CAST(type)); +} +#endif + + +// bpo-40241 added PyObject_GC_IsTracked() to Python 3.9.0a6. +// bpo-4688 added _PyObject_GC_IS_TRACKED() to Python 2.7.0a2. +#if PY_VERSION_HEX < 0x030900A6 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(int) +PyObject_GC_IsTracked(PyObject* obj) +{ + return (PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj)); +} +#endif + +// bpo-40241 added PyObject_GC_IsFinalized() to Python 3.9.0a6. +// bpo-18112 added _PyGCHead_FINALIZED() to Python 3.4.0 final. +#if PY_VERSION_HEX < 0x030900A6 && PY_VERSION_HEX >= 0x030400F0 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(int) +PyObject_GC_IsFinalized(PyObject *obj) +{ + PyGC_Head *gc = _Py_CAST(PyGC_Head*, obj) - 1; + return (PyObject_IS_GC(obj) && _PyGCHead_FINALIZED(gc)); +} +#endif + + +// bpo-39573 added Py_IS_TYPE() to Python 3.9.0a4 +#if PY_VERSION_HEX < 0x030900A4 && !defined(Py_IS_TYPE) +PYCAPI_COMPAT_STATIC_INLINE(int) +_Py_IS_TYPE(PyObject *ob, PyTypeObject *type) { + return Py_TYPE(ob) == type; +} +#define Py_IS_TYPE(ob, type) _Py_IS_TYPE(_PyObject_CAST(ob), type) +#endif + + +// bpo-46906 added PyFloat_Pack2() and PyFloat_Unpack2() to Python 3.11a7. +// bpo-11734 added _PyFloat_Pack2() and _PyFloat_Unpack2() to Python 3.6.0b1. +// Python 3.11a2 moved _PyFloat_Pack2() and _PyFloat_Unpack2() to the internal +// C API: Python 3.11a2-3.11a6 versions are not supported. +#if 0x030600B1 <= PY_VERSION_HEX && PY_VERSION_HEX <= 0x030B00A1 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(int) +PyFloat_Pack2(double x, char *p, int le) +{ return _PyFloat_Pack2(x, (unsigned char*)p, le); } + +PYCAPI_COMPAT_STATIC_INLINE(double) +PyFloat_Unpack2(const char *p, int le) +{ return _PyFloat_Unpack2((const unsigned char *)p, le); } +#endif + + +// bpo-46906 added PyFloat_Pack4(), PyFloat_Pack8(), PyFloat_Unpack4() and +// PyFloat_Unpack8() to Python 3.11a7. +// Python 3.11a2 moved _PyFloat_Pack4(), _PyFloat_Pack8(), _PyFloat_Unpack4() +// and _PyFloat_Unpack8() to the internal C API: Python 3.11a2-3.11a6 versions +// are not supported. +#if PY_VERSION_HEX <= 0x030B00A1 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(int) +PyFloat_Pack4(double x, char *p, int le) +{ return _PyFloat_Pack4(x, (unsigned char*)p, le); } + +PYCAPI_COMPAT_STATIC_INLINE(int) +PyFloat_Pack8(double x, char *p, int le) +{ return _PyFloat_Pack8(x, (unsigned char*)p, le); } + +PYCAPI_COMPAT_STATIC_INLINE(double) +PyFloat_Unpack4(const char *p, int le) +{ return _PyFloat_Unpack4((const unsigned char *)p, le); } + +PYCAPI_COMPAT_STATIC_INLINE(double) +PyFloat_Unpack8(const char *p, int le) +{ return _PyFloat_Unpack8((const unsigned char *)p, le); } +#endif + + +// gh-92154 added PyCode_GetCode() to Python 3.11.0b1 +#if PY_VERSION_HEX < 0x030B00B1 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyCode_GetCode(PyCodeObject *code) +{ + return Py_NewRef(code->co_code); +} +#endif + + +// gh-95008 added PyCode_GetVarnames() to Python 3.11.0rc1 +#if PY_VERSION_HEX < 0x030B00C1 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyCode_GetVarnames(PyCodeObject *code) +{ + return Py_NewRef(code->co_varnames); +} +#endif + +// gh-95008 added PyCode_GetFreevars() to Python 3.11.0rc1 +#if PY_VERSION_HEX < 0x030B00C1 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyCode_GetFreevars(PyCodeObject *code) +{ + return Py_NewRef(code->co_freevars); +} +#endif + +// gh-95008 added PyCode_GetCellvars() to Python 3.11.0rc1 +#if PY_VERSION_HEX < 0x030B00C1 && !defined(PYPY_VERSION) +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyCode_GetCellvars(PyCodeObject *code) +{ + return Py_NewRef(code->co_cellvars); +} +#endif + + +// Py_UNUSED() was added to Python 3.4.0b2. +#if PY_VERSION_HEX < 0x030400B2 && !defined(Py_UNUSED) +# if defined(__GNUC__) || defined(__clang__) +# define Py_UNUSED(name) _unused_ ## name __attribute__((unused)) +# else +# define Py_UNUSED(name) _unused_ ## name +# endif +#endif + + +// gh-105922 added PyImport_AddModuleRef() to Python 3.13.0a1 +#if PY_VERSION_HEX < 0x030D00A0 +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyImport_AddModuleRef(const char *name) +{ + return Py_XNewRef(PyImport_AddModule(name)); +} +#endif + + +// gh-105927 added PyWeakref_GetRef() to Python 3.13.0a1 +#if PY_VERSION_HEX < 0x030D0000 +PYCAPI_COMPAT_STATIC_INLINE(int) +PyWeakref_GetRef(PyObject *ref, PyObject **pobj) +{ + PyObject *obj; + if (ref != NULL && !PyWeakref_Check(ref)) { + *pobj = NULL; + PyErr_SetString(PyExc_TypeError, "expected a weakref"); + return -1; + } + obj = PyWeakref_GetObject(ref); + if (obj == NULL) { + // SystemError if ref is NULL + *pobj = NULL; + return -1; + } + if (obj == Py_None) { + *pobj = NULL; + return 0; + } + *pobj = Py_NewRef(obj); + return (*pobj != NULL); +} +#endif + + +// bpo-36974 added PY_VECTORCALL_ARGUMENTS_OFFSET to Python 3.8b1 +#ifndef PY_VECTORCALL_ARGUMENTS_OFFSET +# define PY_VECTORCALL_ARGUMENTS_OFFSET (_Py_CAST(size_t, 1) << (8 * sizeof(size_t) - 1)) +#endif + +// bpo-36974 added PyVectorcall_NARGS() to Python 3.8b1 +#if PY_VERSION_HEX < 0x030800B1 +static inline Py_ssize_t +PyVectorcall_NARGS(size_t n) +{ + return n & ~PY_VECTORCALL_ARGUMENTS_OFFSET; +} +#endif + + +// gh-105922 added PyObject_Vectorcall() to Python 3.9.0a4 +#if PY_VERSION_HEX < 0x030900A4 +PYCAPI_COMPAT_STATIC_INLINE(PyObject*) +PyObject_Vectorcall(PyObject *callable, PyObject *const *args, + size_t nargsf, PyObject *kwnames) +{ +#if PY_VERSION_HEX >= 0x030800B1 && !defined(PYPY_VERSION) + // bpo-36974 added _PyObject_Vectorcall() to Python 3.8.0b1 + return _PyObject_Vectorcall(callable, args, nargsf, kwnames); +#else + PyObject *posargs = NULL, *kwargs = NULL; + PyObject *res; + Py_ssize_t nposargs, nkwargs, i; + + if (nargsf != 0 && args == NULL) { + PyErr_BadInternalCall(); + goto error; + } + if (kwnames != NULL && !PyTuple_Check(kwnames)) { + PyErr_BadInternalCall(); + goto error; + } + + nposargs = (Py_ssize_t)PyVectorcall_NARGS(nargsf); + if (kwnames) { + nkwargs = PyTuple_GET_SIZE(kwnames); + } + else { + nkwargs = 0; + } + + posargs = PyTuple_New(nposargs); + if (posargs == NULL) { + goto error; + } + if (nposargs) { + for (i=0; i < nposargs; i++) { + PyTuple_SET_ITEM(posargs, i, Py_NewRef(*args)); + args++; + } + } + + if (nkwargs) { + kwargs = PyDict_New(); + if (kwargs == NULL) { + goto error; + } + + for (i = 0; i < nkwargs; i++) { + PyObject *key = PyTuple_GET_ITEM(kwnames, i); + PyObject *value = *args; + args++; + if (PyDict_SetItem(kwargs, key, value) < 0) { + goto error; + } + } + } + else { + kwargs = NULL; + } + + res = PyObject_Call(callable, posargs, kwargs); + Py_DECREF(posargs); + Py_XDECREF(kwargs); + return res; + +error: + Py_DECREF(posargs); + Py_XDECREF(kwargs); + return NULL; +#endif +} +#endif + + +#ifdef __cplusplus +} +#endif +#endif // PYTHONCAPI_COMPAT diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_apply.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_apply.h new file mode 100644 index 0000000000000000000000000000000000000000..bd06e0f3e30b482f6521c869e605790618742404 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_apply.h @@ -0,0 +1,21 @@ +#pragma once + +#include +#include + +namespace torch { +namespace utils { + +const at::Tensor& apply_(const at::Tensor& self, PyObject* fn); +const at::Tensor& map_( + const at::Tensor& self, + const at::Tensor& other_, + PyObject* fn); +const at::Tensor& map2_( + const at::Tensor& self, + const at::Tensor& x_, + const at::Tensor& y_, + PyObject* fn); + +} // namespace utils +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_qschemes.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_qschemes.h new file mode 100644 index 0000000000000000000000000000000000000000..71e65479047b8a5513e32cfe54dbd117c786160b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_qschemes.h @@ -0,0 +1,11 @@ +#pragma once +#include + +namespace torch { +namespace utils { + +PyObject* getTHPQScheme(at::QScheme qscheme); +void initializeQSchemes(); + +} // namespace utils +} // namespace torch diff --git a/vllm/lib/python3.10/site-packages/dns/__init__.py b/vllm/lib/python3.10/site-packages/dns/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a4249b9e7207639a0932109e5d38e7db0b732fca --- /dev/null +++ b/vllm/lib/python3.10/site-packages/dns/__init__.py @@ -0,0 +1,70 @@ +# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license + +# Copyright (C) 2003-2007, 2009, 2011 Nominum, Inc. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose with or without fee is hereby granted, +# provided that the above copyright notice and this permission notice +# appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""dnspython DNS toolkit""" + +__all__ = [ + "asyncbackend", + "asyncquery", + "asyncresolver", + "dnssec", + "dnssecalgs", + "dnssectypes", + "e164", + "edns", + "entropy", + "exception", + "flags", + "immutable", + "inet", + "ipv4", + "ipv6", + "message", + "name", + "namedict", + "node", + "opcode", + "query", + "quic", + "rcode", + "rdata", + "rdataclass", + "rdataset", + "rdatatype", + "renderer", + "resolver", + "reversename", + "rrset", + "serial", + "set", + "tokenizer", + "transaction", + "tsig", + "tsigkeyring", + "ttl", + "rdtypes", + "update", + "version", + "versioned", + "wire", + "xfr", + "zone", + "zonetypes", + "zonefile", +] + +from dns.version import version as __version__ # noqa diff --git a/vllm/lib/python3.10/site-packages/dns/_immutable_ctx.py b/vllm/lib/python3.10/site-packages/dns/_immutable_ctx.py new file mode 100644 index 0000000000000000000000000000000000000000..ae7a33bf3a5f92252a5191b23086fd62e431e785 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/dns/_immutable_ctx.py @@ -0,0 +1,76 @@ +# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license + +# This implementation of the immutable decorator requires python >= +# 3.7, and is significantly more storage efficient when making classes +# with slots immutable. It's also faster. + +import contextvars +import inspect + +_in__init__ = contextvars.ContextVar("_immutable_in__init__", default=False) + + +class _Immutable: + """Immutable mixin class""" + + # We set slots to the empty list to say "we don't have any attributes". + # We do this so that if we're mixed in with a class with __slots__, we + # don't cause a __dict__ to be added which would waste space. + + __slots__ = () + + def __setattr__(self, name, value): + if _in__init__.get() is not self: + raise TypeError("object doesn't support attribute assignment") + else: + super().__setattr__(name, value) + + def __delattr__(self, name): + if _in__init__.get() is not self: + raise TypeError("object doesn't support attribute assignment") + else: + super().__delattr__(name) + + +def _immutable_init(f): + def nf(*args, **kwargs): + previous = _in__init__.set(args[0]) + try: + # call the actual __init__ + f(*args, **kwargs) + finally: + _in__init__.reset(previous) + + nf.__signature__ = inspect.signature(f) + return nf + + +def immutable(cls): + if _Immutable in cls.__mro__: + # Some ancestor already has the mixin, so just make sure we keep + # following the __init__ protocol. + cls.__init__ = _immutable_init(cls.__init__) + if hasattr(cls, "__setstate__"): + cls.__setstate__ = _immutable_init(cls.__setstate__) + ncls = cls + else: + # Mixin the Immutable class and follow the __init__ protocol. + class ncls(_Immutable, cls): + # We have to do the __slots__ declaration here too! + __slots__ = () + + @_immutable_init + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + if hasattr(cls, "__setstate__"): + + @_immutable_init + def __setstate__(self, *args, **kwargs): + super().__setstate__(*args, **kwargs) + + # make ncls have the same name and module as cls + ncls.__name__ = cls.__name__ + ncls.__qualname__ = cls.__qualname__ + ncls.__module__ = cls.__module__ + return ncls diff --git a/vllm/lib/python3.10/site-packages/dns/asyncresolver.py b/vllm/lib/python3.10/site-packages/dns/asyncresolver.py new file mode 100644 index 0000000000000000000000000000000000000000..8f5e062a9ee5c1bf19acf363da7344b8d393e32a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/dns/asyncresolver.py @@ -0,0 +1,475 @@ +# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license + +# Copyright (C) 2003-2017 Nominum, Inc. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose with or without fee is hereby granted, +# provided that the above copyright notice and this permission notice +# appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""Asynchronous DNS stub resolver.""" + +import socket +import time +from typing import Any, Dict, List, Optional, Union + +import dns._ddr +import dns.asyncbackend +import dns.asyncquery +import dns.exception +import dns.name +import dns.query +import dns.rdataclass +import dns.rdatatype +import dns.resolver # lgtm[py/import-and-import-from] + +# import some resolver symbols for brevity +from dns.resolver import NXDOMAIN, NoAnswer, NoRootSOA, NotAbsolute + +# for indentation purposes below +_udp = dns.asyncquery.udp +_tcp = dns.asyncquery.tcp + + +class Resolver(dns.resolver.BaseResolver): + """Asynchronous DNS stub resolver.""" + + async def resolve( + self, + qname: Union[dns.name.Name, str], + rdtype: Union[dns.rdatatype.RdataType, str] = dns.rdatatype.A, + rdclass: Union[dns.rdataclass.RdataClass, str] = dns.rdataclass.IN, + tcp: bool = False, + source: Optional[str] = None, + raise_on_no_answer: bool = True, + source_port: int = 0, + lifetime: Optional[float] = None, + search: Optional[bool] = None, + backend: Optional[dns.asyncbackend.Backend] = None, + ) -> dns.resolver.Answer: + """Query nameservers asynchronously to find the answer to the question. + + *backend*, a ``dns.asyncbackend.Backend``, or ``None``. If ``None``, + the default, then dnspython will use the default backend. + + See :py:func:`dns.resolver.Resolver.resolve()` for the + documentation of the other parameters, exceptions, and return + type of this method. + """ + + resolution = dns.resolver._Resolution( + self, qname, rdtype, rdclass, tcp, raise_on_no_answer, search + ) + if not backend: + backend = dns.asyncbackend.get_default_backend() + start = time.time() + while True: + (request, answer) = resolution.next_request() + # Note we need to say "if answer is not None" and not just + # "if answer" because answer implements __len__, and python + # will call that. We want to return if we have an answer + # object, including in cases where its length is 0. + if answer is not None: + # cache hit! + return answer + assert request is not None # needed for type checking + done = False + while not done: + (nameserver, tcp, backoff) = resolution.next_nameserver() + if backoff: + await backend.sleep(backoff) + timeout = self._compute_timeout(start, lifetime, resolution.errors) + try: + response = await nameserver.async_query( + request, + timeout=timeout, + source=source, + source_port=source_port, + max_size=tcp, + backend=backend, + ) + except Exception as ex: + (_, done) = resolution.query_result(None, ex) + continue + (answer, done) = resolution.query_result(response, None) + # Note we need to say "if answer is not None" and not just + # "if answer" because answer implements __len__, and python + # will call that. We want to return if we have an answer + # object, including in cases where its length is 0. + if answer is not None: + return answer + + async def resolve_address( + self, ipaddr: str, *args: Any, **kwargs: Any + ) -> dns.resolver.Answer: + """Use an asynchronous resolver to run a reverse query for PTR + records. + + This utilizes the resolve() method to perform a PTR lookup on the + specified IP address. + + *ipaddr*, a ``str``, the IPv4 or IPv6 address you want to get + the PTR record for. + + All other arguments that can be passed to the resolve() function + except for rdtype and rdclass are also supported by this + function. + + """ + # We make a modified kwargs for type checking happiness, as otherwise + # we get a legit warning about possibly having rdtype and rdclass + # in the kwargs more than once. + modified_kwargs: Dict[str, Any] = {} + modified_kwargs.update(kwargs) + modified_kwargs["rdtype"] = dns.rdatatype.PTR + modified_kwargs["rdclass"] = dns.rdataclass.IN + return await self.resolve( + dns.reversename.from_address(ipaddr), *args, **modified_kwargs + ) + + async def resolve_name( + self, + name: Union[dns.name.Name, str], + family: int = socket.AF_UNSPEC, + **kwargs: Any, + ) -> dns.resolver.HostAnswers: + """Use an asynchronous resolver to query for address records. + + This utilizes the resolve() method to perform A and/or AAAA lookups on + the specified name. + + *qname*, a ``dns.name.Name`` or ``str``, the name to resolve. + + *family*, an ``int``, the address family. If socket.AF_UNSPEC + (the default), both A and AAAA records will be retrieved. + + All other arguments that can be passed to the resolve() function + except for rdtype and rdclass are also supported by this + function. + """ + # We make a modified kwargs for type checking happiness, as otherwise + # we get a legit warning about possibly having rdtype and rdclass + # in the kwargs more than once. + modified_kwargs: Dict[str, Any] = {} + modified_kwargs.update(kwargs) + modified_kwargs.pop("rdtype", None) + modified_kwargs["rdclass"] = dns.rdataclass.IN + + if family == socket.AF_INET: + v4 = await self.resolve(name, dns.rdatatype.A, **modified_kwargs) + return dns.resolver.HostAnswers.make(v4=v4) + elif family == socket.AF_INET6: + v6 = await self.resolve(name, dns.rdatatype.AAAA, **modified_kwargs) + return dns.resolver.HostAnswers.make(v6=v6) + elif family != socket.AF_UNSPEC: + raise NotImplementedError(f"unknown address family {family}") + + raise_on_no_answer = modified_kwargs.pop("raise_on_no_answer", True) + lifetime = modified_kwargs.pop("lifetime", None) + start = time.time() + v6 = await self.resolve( + name, + dns.rdatatype.AAAA, + raise_on_no_answer=False, + lifetime=self._compute_timeout(start, lifetime), + **modified_kwargs, + ) + # Note that setting name ensures we query the same name + # for A as we did for AAAA. (This is just in case search lists + # are active by default in the resolver configuration and + # we might be talking to a server that says NXDOMAIN when it + # wants to say NOERROR no data. + name = v6.qname + v4 = await self.resolve( + name, + dns.rdatatype.A, + raise_on_no_answer=False, + lifetime=self._compute_timeout(start, lifetime), + **modified_kwargs, + ) + answers = dns.resolver.HostAnswers.make( + v6=v6, v4=v4, add_empty=not raise_on_no_answer + ) + if not answers: + raise NoAnswer(response=v6.response) + return answers + + # pylint: disable=redefined-outer-name + + async def canonical_name(self, name: Union[dns.name.Name, str]) -> dns.name.Name: + """Determine the canonical name of *name*. + + The canonical name is the name the resolver uses for queries + after all CNAME and DNAME renamings have been applied. + + *name*, a ``dns.name.Name`` or ``str``, the query name. + + This method can raise any exception that ``resolve()`` can + raise, other than ``dns.resolver.NoAnswer`` and + ``dns.resolver.NXDOMAIN``. + + Returns a ``dns.name.Name``. + """ + try: + answer = await self.resolve(name, raise_on_no_answer=False) + canonical_name = answer.canonical_name + except dns.resolver.NXDOMAIN as e: + canonical_name = e.canonical_name + return canonical_name + + async def try_ddr(self, lifetime: float = 5.0) -> None: + """Try to update the resolver's nameservers using Discovery of Designated + Resolvers (DDR). If successful, the resolver will subsequently use + DNS-over-HTTPS or DNS-over-TLS for future queries. + + *lifetime*, a float, is the maximum time to spend attempting DDR. The default + is 5 seconds. + + If the SVCB query is successful and results in a non-empty list of nameservers, + then the resolver's nameservers are set to the returned servers in priority + order. + + The current implementation does not use any address hints from the SVCB record, + nor does it resolve addresses for the SCVB target name, rather it assumes that + the bootstrap nameserver will always be one of the addresses and uses it. + A future revision to the code may offer fuller support. The code verifies that + the bootstrap nameserver is in the Subject Alternative Name field of the + TLS certficate. + """ + try: + expiration = time.time() + lifetime + answer = await self.resolve( + dns._ddr._local_resolver_name, "svcb", lifetime=lifetime + ) + timeout = dns.query._remaining(expiration) + nameservers = await dns._ddr._get_nameservers_async(answer, timeout) + if len(nameservers) > 0: + self.nameservers = nameservers + except Exception: + pass + + +default_resolver = None + + +def get_default_resolver() -> Resolver: + """Get the default asynchronous resolver, initializing it if necessary.""" + if default_resolver is None: + reset_default_resolver() + assert default_resolver is not None + return default_resolver + + +def reset_default_resolver() -> None: + """Re-initialize default asynchronous resolver. + + Note that the resolver configuration (i.e. /etc/resolv.conf on UNIX + systems) will be re-read immediately. + """ + + global default_resolver + default_resolver = Resolver() + + +async def resolve( + qname: Union[dns.name.Name, str], + rdtype: Union[dns.rdatatype.RdataType, str] = dns.rdatatype.A, + rdclass: Union[dns.rdataclass.RdataClass, str] = dns.rdataclass.IN, + tcp: bool = False, + source: Optional[str] = None, + raise_on_no_answer: bool = True, + source_port: int = 0, + lifetime: Optional[float] = None, + search: Optional[bool] = None, + backend: Optional[dns.asyncbackend.Backend] = None, +) -> dns.resolver.Answer: + """Query nameservers asynchronously to find the answer to the question. + + This is a convenience function that uses the default resolver + object to make the query. + + See :py:func:`dns.asyncresolver.Resolver.resolve` for more + information on the parameters. + """ + + return await get_default_resolver().resolve( + qname, + rdtype, + rdclass, + tcp, + source, + raise_on_no_answer, + source_port, + lifetime, + search, + backend, + ) + + +async def resolve_address( + ipaddr: str, *args: Any, **kwargs: Any +) -> dns.resolver.Answer: + """Use a resolver to run a reverse query for PTR records. + + See :py:func:`dns.asyncresolver.Resolver.resolve_address` for more + information on the parameters. + """ + + return await get_default_resolver().resolve_address(ipaddr, *args, **kwargs) + + +async def resolve_name( + name: Union[dns.name.Name, str], family: int = socket.AF_UNSPEC, **kwargs: Any +) -> dns.resolver.HostAnswers: + """Use a resolver to asynchronously query for address records. + + See :py:func:`dns.asyncresolver.Resolver.resolve_name` for more + information on the parameters. + """ + + return await get_default_resolver().resolve_name(name, family, **kwargs) + + +async def canonical_name(name: Union[dns.name.Name, str]) -> dns.name.Name: + """Determine the canonical name of *name*. + + See :py:func:`dns.resolver.Resolver.canonical_name` for more + information on the parameters and possible exceptions. + """ + + return await get_default_resolver().canonical_name(name) + + +async def try_ddr(timeout: float = 5.0) -> None: + """Try to update the default resolver's nameservers using Discovery of Designated + Resolvers (DDR). If successful, the resolver will subsequently use + DNS-over-HTTPS or DNS-over-TLS for future queries. + + See :py:func:`dns.resolver.Resolver.try_ddr` for more information. + """ + return await get_default_resolver().try_ddr(timeout) + + +async def zone_for_name( + name: Union[dns.name.Name, str], + rdclass: dns.rdataclass.RdataClass = dns.rdataclass.IN, + tcp: bool = False, + resolver: Optional[Resolver] = None, + backend: Optional[dns.asyncbackend.Backend] = None, +) -> dns.name.Name: + """Find the name of the zone which contains the specified name. + + See :py:func:`dns.resolver.Resolver.zone_for_name` for more + information on the parameters and possible exceptions. + """ + + if isinstance(name, str): + name = dns.name.from_text(name, dns.name.root) + if resolver is None: + resolver = get_default_resolver() + if not name.is_absolute(): + raise NotAbsolute(name) + while True: + try: + answer = await resolver.resolve( + name, dns.rdatatype.SOA, rdclass, tcp, backend=backend + ) + assert answer.rrset is not None + if answer.rrset.name == name: + return name + # otherwise we were CNAMEd or DNAMEd and need to look higher + except (NXDOMAIN, NoAnswer): + pass + try: + name = name.parent() + except dns.name.NoParent: # pragma: no cover + raise NoRootSOA + + +async def make_resolver_at( + where: Union[dns.name.Name, str], + port: int = 53, + family: int = socket.AF_UNSPEC, + resolver: Optional[Resolver] = None, +) -> Resolver: + """Make a stub resolver using the specified destination as the full resolver. + + *where*, a ``dns.name.Name`` or ``str`` the domain name or IP address of the + full resolver. + + *port*, an ``int``, the port to use. If not specified, the default is 53. + + *family*, an ``int``, the address family to use. This parameter is used if + *where* is not an address. The default is ``socket.AF_UNSPEC`` in which case + the first address returned by ``resolve_name()`` will be used, otherwise the + first address of the specified family will be used. + + *resolver*, a ``dns.asyncresolver.Resolver`` or ``None``, the resolver to use for + resolution of hostnames. If not specified, the default resolver will be used. + + Returns a ``dns.resolver.Resolver`` or raises an exception. + """ + if resolver is None: + resolver = get_default_resolver() + nameservers: List[Union[str, dns.nameserver.Nameserver]] = [] + if isinstance(where, str) and dns.inet.is_address(where): + nameservers.append(dns.nameserver.Do53Nameserver(where, port)) + else: + answers = await resolver.resolve_name(where, family) + for address in answers.addresses(): + nameservers.append(dns.nameserver.Do53Nameserver(address, port)) + res = dns.asyncresolver.Resolver(configure=False) + res.nameservers = nameservers + return res + + +async def resolve_at( + where: Union[dns.name.Name, str], + qname: Union[dns.name.Name, str], + rdtype: Union[dns.rdatatype.RdataType, str] = dns.rdatatype.A, + rdclass: Union[dns.rdataclass.RdataClass, str] = dns.rdataclass.IN, + tcp: bool = False, + source: Optional[str] = None, + raise_on_no_answer: bool = True, + source_port: int = 0, + lifetime: Optional[float] = None, + search: Optional[bool] = None, + backend: Optional[dns.asyncbackend.Backend] = None, + port: int = 53, + family: int = socket.AF_UNSPEC, + resolver: Optional[Resolver] = None, +) -> dns.resolver.Answer: + """Query nameservers to find the answer to the question. + + This is a convenience function that calls ``dns.asyncresolver.make_resolver_at()`` + to make a resolver, and then uses it to resolve the query. + + See ``dns.asyncresolver.Resolver.resolve`` for more information on the resolution + parameters, and ``dns.asyncresolver.make_resolver_at`` for information about the + resolver parameters *where*, *port*, *family*, and *resolver*. + + If making more than one query, it is more efficient to call + ``dns.asyncresolver.make_resolver_at()`` and then use that resolver for the queries + instead of calling ``resolve_at()`` multiple times. + """ + res = await make_resolver_at(where, port, family, resolver) + return await res.resolve( + qname, + rdtype, + rdclass, + tcp, + source, + raise_on_no_answer, + source_port, + lifetime, + search, + backend, + ) diff --git a/vllm/lib/python3.10/site-packages/dns/dnssectypes.py b/vllm/lib/python3.10/site-packages/dns/dnssectypes.py new file mode 100644 index 0000000000000000000000000000000000000000..02131e0adaeb85eb49351f4953c854023315fab9 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/dns/dnssectypes.py @@ -0,0 +1,71 @@ +# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license + +# Copyright (C) 2003-2017 Nominum, Inc. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose with or without fee is hereby granted, +# provided that the above copyright notice and this permission notice +# appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""Common DNSSEC-related types.""" + +# This is a separate file to avoid import circularity between dns.dnssec and +# the implementations of the DS and DNSKEY types. + +import dns.enum + + +class Algorithm(dns.enum.IntEnum): + RSAMD5 = 1 + DH = 2 + DSA = 3 + ECC = 4 + RSASHA1 = 5 + DSANSEC3SHA1 = 6 + RSASHA1NSEC3SHA1 = 7 + RSASHA256 = 8 + RSASHA512 = 10 + ECCGOST = 12 + ECDSAP256SHA256 = 13 + ECDSAP384SHA384 = 14 + ED25519 = 15 + ED448 = 16 + INDIRECT = 252 + PRIVATEDNS = 253 + PRIVATEOID = 254 + + @classmethod + def _maximum(cls): + return 255 + + +class DSDigest(dns.enum.IntEnum): + """DNSSEC Delegation Signer Digest Algorithm""" + + NULL = 0 + SHA1 = 1 + SHA256 = 2 + GOST = 3 + SHA384 = 4 + + @classmethod + def _maximum(cls): + return 255 + + +class NSEC3Hash(dns.enum.IntEnum): + """NSEC3 hash algorithm""" + + SHA1 = 1 + + @classmethod + def _maximum(cls): + return 255 diff --git a/vllm/lib/python3.10/site-packages/dns/entropy.py b/vllm/lib/python3.10/site-packages/dns/entropy.py new file mode 100644 index 0000000000000000000000000000000000000000..4dcdc6272ca3a670b1616f4c95f2a18b1803bc82 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/dns/entropy.py @@ -0,0 +1,130 @@ +# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license + +# Copyright (C) 2009-2017 Nominum, Inc. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose with or without fee is hereby granted, +# provided that the above copyright notice and this permission notice +# appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +import hashlib +import os +import random +import threading +import time +from typing import Any, Optional + + +class EntropyPool: + # This is an entropy pool for Python implementations that do not + # have a working SystemRandom. I'm not sure there are any, but + # leaving this code doesn't hurt anything as the library code + # is used if present. + + def __init__(self, seed: Optional[bytes] = None): + self.pool_index = 0 + self.digest: Optional[bytearray] = None + self.next_byte = 0 + self.lock = threading.Lock() + self.hash = hashlib.sha1() + self.hash_len = 20 + self.pool = bytearray(b"\0" * self.hash_len) + if seed is not None: + self._stir(seed) + self.seeded = True + self.seed_pid = os.getpid() + else: + self.seeded = False + self.seed_pid = 0 + + def _stir(self, entropy: bytes) -> None: + for c in entropy: + if self.pool_index == self.hash_len: + self.pool_index = 0 + b = c & 0xFF + self.pool[self.pool_index] ^= b + self.pool_index += 1 + + def stir(self, entropy: bytes) -> None: + with self.lock: + self._stir(entropy) + + def _maybe_seed(self) -> None: + if not self.seeded or self.seed_pid != os.getpid(): + try: + seed = os.urandom(16) + except Exception: # pragma: no cover + try: + with open("/dev/urandom", "rb", 0) as r: + seed = r.read(16) + except Exception: + seed = str(time.time()).encode() + self.seeded = True + self.seed_pid = os.getpid() + self.digest = None + seed = bytearray(seed) + self._stir(seed) + + def random_8(self) -> int: + with self.lock: + self._maybe_seed() + if self.digest is None or self.next_byte == self.hash_len: + self.hash.update(bytes(self.pool)) + self.digest = bytearray(self.hash.digest()) + self._stir(self.digest) + self.next_byte = 0 + value = self.digest[self.next_byte] + self.next_byte += 1 + return value + + def random_16(self) -> int: + return self.random_8() * 256 + self.random_8() + + def random_32(self) -> int: + return self.random_16() * 65536 + self.random_16() + + def random_between(self, first: int, last: int) -> int: + size = last - first + 1 + if size > 4294967296: + raise ValueError("too big") + if size > 65536: + rand = self.random_32 + max = 4294967295 + elif size > 256: + rand = self.random_16 + max = 65535 + else: + rand = self.random_8 + max = 255 + return first + size * rand() // (max + 1) + + +pool = EntropyPool() + +system_random: Optional[Any] +try: + system_random = random.SystemRandom() +except Exception: # pragma: no cover + system_random = None + + +def random_16() -> int: + if system_random is not None: + return system_random.randrange(0, 65536) + else: + return pool.random_16() + + +def between(first: int, last: int) -> int: + if system_random is not None: + return system_random.randrange(first, last + 1) + else: + return pool.random_between(first, last) diff --git a/vllm/lib/python3.10/site-packages/dns/namedict.py b/vllm/lib/python3.10/site-packages/dns/namedict.py new file mode 100644 index 0000000000000000000000000000000000000000..ca8b19789b86a3482110b48d532999bb4cf277b7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/dns/namedict.py @@ -0,0 +1,109 @@ +# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license + +# Copyright (C) 2003-2017 Nominum, Inc. +# Copyright (C) 2016 Coresec Systems AB +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose with or without fee is hereby granted, +# provided that the above copyright notice and this permission notice +# appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND CORESEC SYSTEMS AB DISCLAIMS ALL +# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL CORESEC +# SYSTEMS AB BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR +# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION +# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""DNS name dictionary""" + +# pylint seems to be confused about this one! +from collections.abc import MutableMapping # pylint: disable=no-name-in-module + +import dns.name + + +class NameDict(MutableMapping): + """A dictionary whose keys are dns.name.Name objects. + + In addition to being like a regular Python dictionary, this + dictionary can also get the deepest match for a given key. + """ + + __slots__ = ["max_depth", "max_depth_items", "__store"] + + def __init__(self, *args, **kwargs): + super().__init__() + self.__store = dict() + #: the maximum depth of the keys that have ever been added + self.max_depth = 0 + #: the number of items of maximum depth + self.max_depth_items = 0 + self.update(dict(*args, **kwargs)) + + def __update_max_depth(self, key): + if len(key) == self.max_depth: + self.max_depth_items = self.max_depth_items + 1 + elif len(key) > self.max_depth: + self.max_depth = len(key) + self.max_depth_items = 1 + + def __getitem__(self, key): + return self.__store[key] + + def __setitem__(self, key, value): + if not isinstance(key, dns.name.Name): + raise ValueError("NameDict key must be a name") + self.__store[key] = value + self.__update_max_depth(key) + + def __delitem__(self, key): + self.__store.pop(key) + if len(key) == self.max_depth: + self.max_depth_items = self.max_depth_items - 1 + if self.max_depth_items == 0: + self.max_depth = 0 + for k in self.__store: + self.__update_max_depth(k) + + def __iter__(self): + return iter(self.__store) + + def __len__(self): + return len(self.__store) + + def has_key(self, key): + return key in self.__store + + def get_deepest_match(self, name): + """Find the deepest match to *name* in the dictionary. + + The deepest match is the longest name in the dictionary which is + a superdomain of *name*. Note that *superdomain* includes matching + *name* itself. + + *name*, a ``dns.name.Name``, the name to find. + + Returns a ``(key, value)`` where *key* is the deepest + ``dns.name.Name``, and *value* is the value associated with *key*. + """ + + depth = len(name) + if depth > self.max_depth: + depth = self.max_depth + for i in range(-depth, 0): + n = dns.name.Name(name[i:]) + if n in self: + return (n, self[n]) + v = self[dns.name.empty] + return (dns.name.empty, v) diff --git a/vllm/lib/python3.10/site-packages/dns/opcode.py b/vllm/lib/python3.10/site-packages/dns/opcode.py new file mode 100644 index 0000000000000000000000000000000000000000..78b43d2cbd1404b57f683b2bfad7f726e99caffd --- /dev/null +++ b/vllm/lib/python3.10/site-packages/dns/opcode.py @@ -0,0 +1,117 @@ +# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license + +# Copyright (C) 2001-2017 Nominum, Inc. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose with or without fee is hereby granted, +# provided that the above copyright notice and this permission notice +# appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""DNS Opcodes.""" + +import dns.enum +import dns.exception + + +class Opcode(dns.enum.IntEnum): + #: Query + QUERY = 0 + #: Inverse Query (historical) + IQUERY = 1 + #: Server Status (unspecified and unimplemented anywhere) + STATUS = 2 + #: Notify + NOTIFY = 4 + #: Dynamic Update + UPDATE = 5 + + @classmethod + def _maximum(cls): + return 15 + + @classmethod + def _unknown_exception_class(cls): + return UnknownOpcode + + +class UnknownOpcode(dns.exception.DNSException): + """An DNS opcode is unknown.""" + + +def from_text(text: str) -> Opcode: + """Convert text into an opcode. + + *text*, a ``str``, the textual opcode + + Raises ``dns.opcode.UnknownOpcode`` if the opcode is unknown. + + Returns an ``int``. + """ + + return Opcode.from_text(text) + + +def from_flags(flags: int) -> Opcode: + """Extract an opcode from DNS message flags. + + *flags*, an ``int``, the DNS flags. + + Returns an ``int``. + """ + + return Opcode((flags & 0x7800) >> 11) + + +def to_flags(value: Opcode) -> int: + """Convert an opcode to a value suitable for ORing into DNS message + flags. + + *value*, an ``int``, the DNS opcode value. + + Returns an ``int``. + """ + + return (value << 11) & 0x7800 + + +def to_text(value: Opcode) -> str: + """Convert an opcode to text. + + *value*, an ``int`` the opcode value, + + Raises ``dns.opcode.UnknownOpcode`` if the opcode is unknown. + + Returns a ``str``. + """ + + return Opcode.to_text(value) + + +def is_update(flags: int) -> bool: + """Is the opcode in flags UPDATE? + + *flags*, an ``int``, the DNS message flags. + + Returns a ``bool``. + """ + + return from_flags(flags) == Opcode.UPDATE + + +### BEGIN generated Opcode constants + +QUERY = Opcode.QUERY +IQUERY = Opcode.IQUERY +STATUS = Opcode.STATUS +NOTIFY = Opcode.NOTIFY +UPDATE = Opcode.UPDATE + +### END generated Opcode constants diff --git a/vllm/lib/python3.10/site-packages/dns/query.py b/vllm/lib/python3.10/site-packages/dns/query.py new file mode 100644 index 0000000000000000000000000000000000000000..0d8a977abdd2351addfad50f379173f02fe7b80e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/dns/query.py @@ -0,0 +1,1665 @@ +# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license + +# Copyright (C) 2003-2017 Nominum, Inc. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose with or without fee is hereby granted, +# provided that the above copyright notice and this permission notice +# appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""Talk to a DNS server.""" + +import base64 +import contextlib +import enum +import errno +import os +import os.path +import random +import selectors +import socket +import struct +import time +import urllib.parse +from typing import Any, Dict, Optional, Tuple, Union, cast + +import dns._features +import dns.exception +import dns.inet +import dns.message +import dns.name +import dns.quic +import dns.rcode +import dns.rdataclass +import dns.rdatatype +import dns.serial +import dns.transaction +import dns.tsig +import dns.xfr + + +def _remaining(expiration): + if expiration is None: + return None + timeout = expiration - time.time() + if timeout <= 0.0: + raise dns.exception.Timeout + return timeout + + +def _expiration_for_this_attempt(timeout, expiration): + if expiration is None: + return None + return min(time.time() + timeout, expiration) + + +_have_httpx = dns._features.have("doh") +if _have_httpx: + import httpcore._backends.sync + import httpx + + _CoreNetworkBackend = httpcore.NetworkBackend + _CoreSyncStream = httpcore._backends.sync.SyncStream + + class _NetworkBackend(_CoreNetworkBackend): + def __init__(self, resolver, local_port, bootstrap_address, family): + super().__init__() + self._local_port = local_port + self._resolver = resolver + self._bootstrap_address = bootstrap_address + self._family = family + + def connect_tcp( + self, host, port, timeout, local_address, socket_options=None + ): # pylint: disable=signature-differs + addresses = [] + _, expiration = _compute_times(timeout) + if dns.inet.is_address(host): + addresses.append(host) + elif self._bootstrap_address is not None: + addresses.append(self._bootstrap_address) + else: + timeout = _remaining(expiration) + family = self._family + if local_address: + family = dns.inet.af_for_address(local_address) + answers = self._resolver.resolve_name( + host, family=family, lifetime=timeout + ) + addresses = answers.addresses() + for address in addresses: + af = dns.inet.af_for_address(address) + if local_address is not None or self._local_port != 0: + source = dns.inet.low_level_address_tuple( + (local_address, self._local_port), af + ) + else: + source = None + sock = _make_socket(af, socket.SOCK_STREAM, source) + attempt_expiration = _expiration_for_this_attempt(2.0, expiration) + try: + _connect( + sock, + dns.inet.low_level_address_tuple((address, port), af), + attempt_expiration, + ) + return _CoreSyncStream(sock) + except Exception: + pass + raise httpcore.ConnectError + + def connect_unix_socket( + self, path, timeout, socket_options=None + ): # pylint: disable=signature-differs + raise NotImplementedError + + class _HTTPTransport(httpx.HTTPTransport): + def __init__( + self, + *args, + local_port=0, + bootstrap_address=None, + resolver=None, + family=socket.AF_UNSPEC, + **kwargs, + ): + if resolver is None and bootstrap_address is None: + # pylint: disable=import-outside-toplevel,redefined-outer-name + import dns.resolver + + resolver = dns.resolver.Resolver() + super().__init__(*args, **kwargs) + self._pool._network_backend = _NetworkBackend( + resolver, local_port, bootstrap_address, family + ) + +else: + + class _HTTPTransport: # type: ignore + def connect_tcp(self, host, port, timeout, local_address): + raise NotImplementedError + + +have_doh = _have_httpx + +try: + import ssl +except ImportError: # pragma: no cover + + class ssl: # type: ignore + CERT_NONE = 0 + + class WantReadException(Exception): + pass + + class WantWriteException(Exception): + pass + + class SSLContext: + pass + + class SSLSocket: + pass + + @classmethod + def create_default_context(cls, *args, **kwargs): + raise Exception("no ssl support") # pylint: disable=broad-exception-raised + + +# Function used to create a socket. Can be overridden if needed in special +# situations. +socket_factory = socket.socket + + +class UnexpectedSource(dns.exception.DNSException): + """A DNS query response came from an unexpected address or port.""" + + +class BadResponse(dns.exception.FormError): + """A DNS query response does not respond to the question asked.""" + + +class NoDOH(dns.exception.DNSException): + """DNS over HTTPS (DOH) was requested but the httpx module is not + available.""" + + +class NoDOQ(dns.exception.DNSException): + """DNS over QUIC (DOQ) was requested but the aioquic module is not + available.""" + + +# for backwards compatibility +TransferError = dns.xfr.TransferError + + +def _compute_times(timeout): + now = time.time() + if timeout is None: + return (now, None) + else: + return (now, now + timeout) + + +def _wait_for(fd, readable, writable, _, expiration): + # Use the selected selector class to wait for any of the specified + # events. An "expiration" absolute time is converted into a relative + # timeout. + # + # The unused parameter is 'error', which is always set when + # selecting for read or write, and we have no error-only selects. + + if readable and isinstance(fd, ssl.SSLSocket) and fd.pending() > 0: + return True + sel = selectors.DefaultSelector() + events = 0 + if readable: + events |= selectors.EVENT_READ + if writable: + events |= selectors.EVENT_WRITE + if events: + sel.register(fd, events) + if expiration is None: + timeout = None + else: + timeout = expiration - time.time() + if timeout <= 0.0: + raise dns.exception.Timeout + if not sel.select(timeout): + raise dns.exception.Timeout + + +def _wait_for_readable(s, expiration): + _wait_for(s, True, False, True, expiration) + + +def _wait_for_writable(s, expiration): + _wait_for(s, False, True, True, expiration) + + +def _addresses_equal(af, a1, a2): + # Convert the first value of the tuple, which is a textual format + # address into binary form, so that we are not confused by different + # textual representations of the same address + try: + n1 = dns.inet.inet_pton(af, a1[0]) + n2 = dns.inet.inet_pton(af, a2[0]) + except dns.exception.SyntaxError: + return False + return n1 == n2 and a1[1:] == a2[1:] + + +def _matches_destination(af, from_address, destination, ignore_unexpected): + # Check that from_address is appropriate for a response to a query + # sent to destination. + if not destination: + return True + if _addresses_equal(af, from_address, destination) or ( + dns.inet.is_multicast(destination[0]) and from_address[1:] == destination[1:] + ): + return True + elif ignore_unexpected: + return False + raise UnexpectedSource( + f"got a response from {from_address} instead of " f"{destination}" + ) + + +def _destination_and_source( + where, port, source, source_port, where_must_be_address=True +): + # Apply defaults and compute destination and source tuples + # suitable for use in connect(), sendto(), or bind(). + af = None + destination = None + try: + af = dns.inet.af_for_address(where) + destination = where + except Exception: + if where_must_be_address: + raise + # URLs are ok so eat the exception + if source: + saf = dns.inet.af_for_address(source) + if af: + # We know the destination af, so source had better agree! + if saf != af: + raise ValueError( + "different address families for source and destination" + ) + else: + # We didn't know the destination af, but we know the source, + # so that's our af. + af = saf + if source_port and not source: + # Caller has specified a source_port but not an address, so we + # need to return a source, and we need to use the appropriate + # wildcard address as the address. + try: + source = dns.inet.any_for_af(af) + except Exception: + # we catch this and raise ValueError for backwards compatibility + raise ValueError("source_port specified but address family is unknown") + # Convert high-level (address, port) tuples into low-level address + # tuples. + if destination: + destination = dns.inet.low_level_address_tuple((destination, port), af) + if source: + source = dns.inet.low_level_address_tuple((source, source_port), af) + return (af, destination, source) + + +def _make_socket(af, type, source, ssl_context=None, server_hostname=None): + s = socket_factory(af, type) + try: + s.setblocking(False) + if source is not None: + s.bind(source) + if ssl_context: + # LGTM gets a false positive here, as our default context is OK + return ssl_context.wrap_socket( + s, + do_handshake_on_connect=False, # lgtm[py/insecure-protocol] + server_hostname=server_hostname, + ) + else: + return s + except Exception: + s.close() + raise + + +def _maybe_get_resolver( + resolver: Optional["dns.resolver.Resolver"], +) -> "dns.resolver.Resolver": + # We need a separate method for this to avoid overriding the global + # variable "dns" with the as-yet undefined local variable "dns" + # in https(). + if resolver is None: + # pylint: disable=import-outside-toplevel,redefined-outer-name + import dns.resolver + + resolver = dns.resolver.Resolver() + return resolver + + +class HTTPVersion(enum.IntEnum): + """Which version of HTTP should be used? + + DEFAULT will select the first version from the list [2, 1.1, 3] that + is available. + """ + + DEFAULT = 0 + HTTP_1 = 1 + H1 = 1 + HTTP_2 = 2 + H2 = 2 + HTTP_3 = 3 + H3 = 3 + + +def https( + q: dns.message.Message, + where: str, + timeout: Optional[float] = None, + port: int = 443, + source: Optional[str] = None, + source_port: int = 0, + one_rr_per_rrset: bool = False, + ignore_trailing: bool = False, + session: Optional[Any] = None, + path: str = "/dns-query", + post: bool = True, + bootstrap_address: Optional[str] = None, + verify: Union[bool, str] = True, + resolver: Optional["dns.resolver.Resolver"] = None, + family: int = socket.AF_UNSPEC, + http_version: HTTPVersion = HTTPVersion.DEFAULT, +) -> dns.message.Message: + """Return the response obtained after sending a query via DNS-over-HTTPS. + + *q*, a ``dns.message.Message``, the query to send. + + *where*, a ``str``, the nameserver IP address or the full URL. If an IP address is + given, the URL will be constructed using the following schema: + https://:/. + + *timeout*, a ``float`` or ``None``, the number of seconds to wait before the query + times out. If ``None``, the default, wait forever. + + *port*, a ``int``, the port to send the query to. The default is 443. + + *source*, a ``str`` containing an IPv4 or IPv6 address, specifying the source + address. The default is the wildcard address. + + *source_port*, an ``int``, the port from which to send the message. The default is + 0. + + *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own RRset. + + *ignore_trailing*, a ``bool``. If ``True``, ignore trailing junk at end of the + received message. + + *session*, an ``httpx.Client``. If provided, the client session to use to send the + queries. + + *path*, a ``str``. If *where* is an IP address, then *path* will be used to + construct the URL to send the DNS query to. + + *post*, a ``bool``. If ``True``, the default, POST method will be used. + + *bootstrap_address*, a ``str``, the IP address to use to bypass resolution. + + *verify*, a ``bool`` or ``str``. If a ``True``, then TLS certificate verification + of the server is done using the default CA bundle; if ``False``, then no + verification is done; if a `str` then it specifies the path to a certificate file or + directory which will be used for verification. + + *resolver*, a ``dns.resolver.Resolver`` or ``None``, the resolver to use for + resolution of hostnames in URLs. If not specified, a new resolver with a default + configuration will be used; note this is *not* the default resolver as that resolver + might have been configured to use DoH causing a chicken-and-egg problem. This + parameter only has an effect if the HTTP library is httpx. + + *family*, an ``int``, the address family. If socket.AF_UNSPEC (the default), both A + and AAAA records will be retrieved. + + *http_version*, a ``dns.query.HTTPVersion``, indicating which HTTP version to use. + + Returns a ``dns.message.Message``. + """ + + (af, _, the_source) = _destination_and_source( + where, port, source, source_port, False + ) + if af is not None and dns.inet.is_address(where): + if af == socket.AF_INET: + url = f"https://{where}:{port}{path}" + elif af == socket.AF_INET6: + url = f"https://[{where}]:{port}{path}" + else: + url = where + + extensions = {} + if bootstrap_address is None: + # pylint: disable=possibly-used-before-assignment + parsed = urllib.parse.urlparse(url) + if parsed.hostname is None: + raise ValueError("no hostname in URL") + if dns.inet.is_address(parsed.hostname): + bootstrap_address = parsed.hostname + extensions["sni_hostname"] = parsed.hostname + if parsed.port is not None: + port = parsed.port + + if http_version == HTTPVersion.H3 or ( + http_version == HTTPVersion.DEFAULT and not have_doh + ): + if bootstrap_address is None: + resolver = _maybe_get_resolver(resolver) + assert parsed.hostname is not None # for mypy + answers = resolver.resolve_name(parsed.hostname, family) + bootstrap_address = random.choice(list(answers.addresses())) + return _http3( + q, + bootstrap_address, + url, + timeout, + port, + source, + source_port, + one_rr_per_rrset, + ignore_trailing, + verify=verify, + post=post, + ) + + if not have_doh: + raise NoDOH # pragma: no cover + if session and not isinstance(session, httpx.Client): + raise ValueError("session parameter must be an httpx.Client") + + wire = q.to_wire() + headers = {"accept": "application/dns-message"} + + h1 = http_version in (HTTPVersion.H1, HTTPVersion.DEFAULT) + h2 = http_version in (HTTPVersion.H2, HTTPVersion.DEFAULT) + + # set source port and source address + + if the_source is None: + local_address = None + local_port = 0 + else: + local_address = the_source[0] + local_port = the_source[1] + + if session: + cm: contextlib.AbstractContextManager = contextlib.nullcontext(session) + else: + transport = _HTTPTransport( + local_address=local_address, + http1=h1, + http2=h2, + verify=verify, + local_port=local_port, + bootstrap_address=bootstrap_address, + resolver=resolver, + family=family, + ) + + cm = httpx.Client(http1=h1, http2=h2, verify=verify, transport=transport) + with cm as session: + # see https://tools.ietf.org/html/rfc8484#section-4.1.1 for DoH + # GET and POST examples + if post: + headers.update( + { + "content-type": "application/dns-message", + "content-length": str(len(wire)), + } + ) + response = session.post( + url, + headers=headers, + content=wire, + timeout=timeout, + extensions=extensions, + ) + else: + wire = base64.urlsafe_b64encode(wire).rstrip(b"=") + twire = wire.decode() # httpx does a repr() if we give it bytes + response = session.get( + url, + headers=headers, + timeout=timeout, + params={"dns": twire}, + extensions=extensions, + ) + + # see https://tools.ietf.org/html/rfc8484#section-4.2.1 for info about DoH + # status codes + if response.status_code < 200 or response.status_code > 299: + raise ValueError( + f"{where} responded with status code {response.status_code}" + f"\nResponse body: {response.content}" + ) + r = dns.message.from_wire( + response.content, + keyring=q.keyring, + request_mac=q.request_mac, + one_rr_per_rrset=one_rr_per_rrset, + ignore_trailing=ignore_trailing, + ) + r.time = response.elapsed.total_seconds() + if not q.is_response(r): + raise BadResponse + return r + + +def _find_header(headers: dns.quic.Headers, name: bytes) -> bytes: + if headers is None: + raise KeyError + for header, value in headers: + if header == name: + return value + raise KeyError + + +def _check_status(headers: dns.quic.Headers, peer: str, wire: bytes) -> None: + value = _find_header(headers, b":status") + if value is None: + raise SyntaxError("no :status header in response") + status = int(value) + if status < 0: + raise SyntaxError("status is negative") + if status < 200 or status > 299: + error = "" + if len(wire) > 0: + try: + error = ": " + wire.decode() + except Exception: + pass + raise ValueError(f"{peer} responded with status code {status}{error}") + + +def _http3( + q: dns.message.Message, + where: str, + url: str, + timeout: Optional[float] = None, + port: int = 853, + source: Optional[str] = None, + source_port: int = 0, + one_rr_per_rrset: bool = False, + ignore_trailing: bool = False, + verify: Union[bool, str] = True, + hostname: Optional[str] = None, + post: bool = True, +) -> dns.message.Message: + if not dns.quic.have_quic: + raise NoDOH("DNS-over-HTTP3 is not available.") # pragma: no cover + + url_parts = urllib.parse.urlparse(url) + hostname = url_parts.hostname + if url_parts.port is not None: + port = url_parts.port + + q.id = 0 + wire = q.to_wire() + manager = dns.quic.SyncQuicManager( + verify_mode=verify, server_name=hostname, h3=True + ) + + with manager: + connection = manager.connect(where, port, source, source_port) + (start, expiration) = _compute_times(timeout) + with connection.make_stream(timeout) as stream: + stream.send_h3(url, wire, post) + wire = stream.receive(_remaining(expiration)) + _check_status(stream.headers(), where, wire) + finish = time.time() + r = dns.message.from_wire( + wire, + keyring=q.keyring, + request_mac=q.request_mac, + one_rr_per_rrset=one_rr_per_rrset, + ignore_trailing=ignore_trailing, + ) + r.time = max(finish - start, 0.0) + if not q.is_response(r): + raise BadResponse + return r + + +def _udp_recv(sock, max_size, expiration): + """Reads a datagram from the socket. + A Timeout exception will be raised if the operation is not completed + by the expiration time. + """ + while True: + try: + return sock.recvfrom(max_size) + except BlockingIOError: + _wait_for_readable(sock, expiration) + + +def _udp_send(sock, data, destination, expiration): + """Sends the specified datagram to destination over the socket. + A Timeout exception will be raised if the operation is not completed + by the expiration time. + """ + while True: + try: + if destination: + return sock.sendto(data, destination) + else: + return sock.send(data) + except BlockingIOError: # pragma: no cover + _wait_for_writable(sock, expiration) + + +def send_udp( + sock: Any, + what: Union[dns.message.Message, bytes], + destination: Any, + expiration: Optional[float] = None, +) -> Tuple[int, float]: + """Send a DNS message to the specified UDP socket. + + *sock*, a ``socket``. + + *what*, a ``bytes`` or ``dns.message.Message``, the message to send. + + *destination*, a destination tuple appropriate for the address family + of the socket, specifying where to send the query. + + *expiration*, a ``float`` or ``None``, the absolute time at which + a timeout exception should be raised. If ``None``, no timeout will + occur. + + Returns an ``(int, float)`` tuple of bytes sent and the sent time. + """ + + if isinstance(what, dns.message.Message): + what = what.to_wire() + sent_time = time.time() + n = _udp_send(sock, what, destination, expiration) + return (n, sent_time) + + +def receive_udp( + sock: Any, + destination: Optional[Any] = None, + expiration: Optional[float] = None, + ignore_unexpected: bool = False, + one_rr_per_rrset: bool = False, + keyring: Optional[Dict[dns.name.Name, dns.tsig.Key]] = None, + request_mac: Optional[bytes] = b"", + ignore_trailing: bool = False, + raise_on_truncation: bool = False, + ignore_errors: bool = False, + query: Optional[dns.message.Message] = None, +) -> Any: + """Read a DNS message from a UDP socket. + + *sock*, a ``socket``. + + *destination*, a destination tuple appropriate for the address family + of the socket, specifying where the message is expected to arrive from. + When receiving a response, this would be where the associated query was + sent. + + *expiration*, a ``float`` or ``None``, the absolute time at which + a timeout exception should be raised. If ``None``, no timeout will + occur. + + *ignore_unexpected*, a ``bool``. If ``True``, ignore responses from + unexpected sources. + + *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own + RRset. + + *keyring*, a ``dict``, the keyring to use for TSIG. + + *request_mac*, a ``bytes`` or ``None``, the MAC of the request (for TSIG). + + *ignore_trailing*, a ``bool``. If ``True``, ignore trailing + junk at end of the received message. + + *raise_on_truncation*, a ``bool``. If ``True``, raise an exception if + the TC bit is set. + + Raises if the message is malformed, if network errors occur, of if + there is a timeout. + + If *destination* is not ``None``, returns a ``(dns.message.Message, float)`` + tuple of the received message and the received time. + + If *destination* is ``None``, returns a + ``(dns.message.Message, float, tuple)`` + tuple of the received message, the received time, and the address where + the message arrived from. + + *ignore_errors*, a ``bool``. If various format errors or response + mismatches occur, ignore them and keep listening for a valid response. + The default is ``False``. + + *query*, a ``dns.message.Message`` or ``None``. If not ``None`` and + *ignore_errors* is ``True``, check that the received message is a response + to this query, and if not keep listening for a valid response. + """ + + wire = b"" + while True: + (wire, from_address) = _udp_recv(sock, 65535, expiration) + if not _matches_destination( + sock.family, from_address, destination, ignore_unexpected + ): + continue + received_time = time.time() + try: + r = dns.message.from_wire( + wire, + keyring=keyring, + request_mac=request_mac, + one_rr_per_rrset=one_rr_per_rrset, + ignore_trailing=ignore_trailing, + raise_on_truncation=raise_on_truncation, + ) + except dns.message.Truncated as e: + # If we got Truncated and not FORMERR, we at least got the header with TC + # set, and very likely the question section, so we'll re-raise if the + # message seems to be a response as we need to know when truncation happens. + # We need to check that it seems to be a response as we don't want a random + # injected message with TC set to cause us to bail out. + if ( + ignore_errors + and query is not None + and not query.is_response(e.message()) + ): + continue + else: + raise + except Exception: + if ignore_errors: + continue + else: + raise + if ignore_errors and query is not None and not query.is_response(r): + continue + if destination: + return (r, received_time) + else: + return (r, received_time, from_address) + + +def udp( + q: dns.message.Message, + where: str, + timeout: Optional[float] = None, + port: int = 53, + source: Optional[str] = None, + source_port: int = 0, + ignore_unexpected: bool = False, + one_rr_per_rrset: bool = False, + ignore_trailing: bool = False, + raise_on_truncation: bool = False, + sock: Optional[Any] = None, + ignore_errors: bool = False, +) -> dns.message.Message: + """Return the response obtained after sending a query via UDP. + + *q*, a ``dns.message.Message``, the query to send + + *where*, a ``str`` containing an IPv4 or IPv6 address, where + to send the message. + + *timeout*, a ``float`` or ``None``, the number of seconds to wait before the + query times out. If ``None``, the default, wait forever. + + *port*, an ``int``, the port send the message to. The default is 53. + + *source*, a ``str`` containing an IPv4 or IPv6 address, specifying + the source address. The default is the wildcard address. + + *source_port*, an ``int``, the port from which to send the message. + The default is 0. + + *ignore_unexpected*, a ``bool``. If ``True``, ignore responses from + unexpected sources. + + *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own + RRset. + + *ignore_trailing*, a ``bool``. If ``True``, ignore trailing + junk at end of the received message. + + *raise_on_truncation*, a ``bool``. If ``True``, raise an exception if + the TC bit is set. + + *sock*, a ``socket.socket``, or ``None``, the socket to use for the + query. If ``None``, the default, a socket is created. Note that + if a socket is provided, it must be a nonblocking datagram socket, + and the *source* and *source_port* are ignored. + + *ignore_errors*, a ``bool``. If various format errors or response + mismatches occur, ignore them and keep listening for a valid response. + The default is ``False``. + + Returns a ``dns.message.Message``. + """ + + wire = q.to_wire() + (af, destination, source) = _destination_and_source( + where, port, source, source_port + ) + (begin_time, expiration) = _compute_times(timeout) + if sock: + cm: contextlib.AbstractContextManager = contextlib.nullcontext(sock) + else: + cm = _make_socket(af, socket.SOCK_DGRAM, source) + with cm as s: + send_udp(s, wire, destination, expiration) + (r, received_time) = receive_udp( + s, + destination, + expiration, + ignore_unexpected, + one_rr_per_rrset, + q.keyring, + q.mac, + ignore_trailing, + raise_on_truncation, + ignore_errors, + q, + ) + r.time = received_time - begin_time + # We don't need to check q.is_response() if we are in ignore_errors mode + # as receive_udp() will have checked it. + if not (ignore_errors or q.is_response(r)): + raise BadResponse + return r + assert ( + False # help mypy figure out we can't get here lgtm[py/unreachable-statement] + ) + + +def udp_with_fallback( + q: dns.message.Message, + where: str, + timeout: Optional[float] = None, + port: int = 53, + source: Optional[str] = None, + source_port: int = 0, + ignore_unexpected: bool = False, + one_rr_per_rrset: bool = False, + ignore_trailing: bool = False, + udp_sock: Optional[Any] = None, + tcp_sock: Optional[Any] = None, + ignore_errors: bool = False, +) -> Tuple[dns.message.Message, bool]: + """Return the response to the query, trying UDP first and falling back + to TCP if UDP results in a truncated response. + + *q*, a ``dns.message.Message``, the query to send + + *where*, a ``str`` containing an IPv4 or IPv6 address, where to send the message. + + *timeout*, a ``float`` or ``None``, the number of seconds to wait before the query + times out. If ``None``, the default, wait forever. + + *port*, an ``int``, the port send the message to. The default is 53. + + *source*, a ``str`` containing an IPv4 or IPv6 address, specifying the source + address. The default is the wildcard address. + + *source_port*, an ``int``, the port from which to send the message. The default is + 0. + + *ignore_unexpected*, a ``bool``. If ``True``, ignore responses from unexpected + sources. + + *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own RRset. + + *ignore_trailing*, a ``bool``. If ``True``, ignore trailing junk at end of the + received message. + + *udp_sock*, a ``socket.socket``, or ``None``, the socket to use for the UDP query. + If ``None``, the default, a socket is created. Note that if a socket is provided, + it must be a nonblocking datagram socket, and the *source* and *source_port* are + ignored for the UDP query. + + *tcp_sock*, a ``socket.socket``, or ``None``, the connected socket to use for the + TCP query. If ``None``, the default, a socket is created. Note that if a socket is + provided, it must be a nonblocking connected stream socket, and *where*, *source* + and *source_port* are ignored for the TCP query. + + *ignore_errors*, a ``bool``. If various format errors or response mismatches occur + while listening for UDP, ignore them and keep listening for a valid response. The + default is ``False``. + + Returns a (``dns.message.Message``, tcp) tuple where tcp is ``True`` if and only if + TCP was used. + """ + try: + response = udp( + q, + where, + timeout, + port, + source, + source_port, + ignore_unexpected, + one_rr_per_rrset, + ignore_trailing, + True, + udp_sock, + ignore_errors, + ) + return (response, False) + except dns.message.Truncated: + response = tcp( + q, + where, + timeout, + port, + source, + source_port, + one_rr_per_rrset, + ignore_trailing, + tcp_sock, + ) + return (response, True) + + +def _net_read(sock, count, expiration): + """Read the specified number of bytes from sock. Keep trying until we + either get the desired amount, or we hit EOF. + A Timeout exception will be raised if the operation is not completed + by the expiration time. + """ + s = b"" + while count > 0: + try: + n = sock.recv(count) + if n == b"": + raise EOFError("EOF") + count -= len(n) + s += n + except (BlockingIOError, ssl.SSLWantReadError): + _wait_for_readable(sock, expiration) + except ssl.SSLWantWriteError: # pragma: no cover + _wait_for_writable(sock, expiration) + return s + + +def _net_write(sock, data, expiration): + """Write the specified data to the socket. + A Timeout exception will be raised if the operation is not completed + by the expiration time. + """ + current = 0 + l = len(data) + while current < l: + try: + current += sock.send(data[current:]) + except (BlockingIOError, ssl.SSLWantWriteError): + _wait_for_writable(sock, expiration) + except ssl.SSLWantReadError: # pragma: no cover + _wait_for_readable(sock, expiration) + + +def send_tcp( + sock: Any, + what: Union[dns.message.Message, bytes], + expiration: Optional[float] = None, +) -> Tuple[int, float]: + """Send a DNS message to the specified TCP socket. + + *sock*, a ``socket``. + + *what*, a ``bytes`` or ``dns.message.Message``, the message to send. + + *expiration*, a ``float`` or ``None``, the absolute time at which + a timeout exception should be raised. If ``None``, no timeout will + occur. + + Returns an ``(int, float)`` tuple of bytes sent and the sent time. + """ + + if isinstance(what, dns.message.Message): + tcpmsg = what.to_wire(prepend_length=True) + else: + # copying the wire into tcpmsg is inefficient, but lets us + # avoid writev() or doing a short write that would get pushed + # onto the net + tcpmsg = len(what).to_bytes(2, "big") + what + sent_time = time.time() + _net_write(sock, tcpmsg, expiration) + return (len(tcpmsg), sent_time) + + +def receive_tcp( + sock: Any, + expiration: Optional[float] = None, + one_rr_per_rrset: bool = False, + keyring: Optional[Dict[dns.name.Name, dns.tsig.Key]] = None, + request_mac: Optional[bytes] = b"", + ignore_trailing: bool = False, +) -> Tuple[dns.message.Message, float]: + """Read a DNS message from a TCP socket. + + *sock*, a ``socket``. + + *expiration*, a ``float`` or ``None``, the absolute time at which + a timeout exception should be raised. If ``None``, no timeout will + occur. + + *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own + RRset. + + *keyring*, a ``dict``, the keyring to use for TSIG. + + *request_mac*, a ``bytes`` or ``None``, the MAC of the request (for TSIG). + + *ignore_trailing*, a ``bool``. If ``True``, ignore trailing + junk at end of the received message. + + Raises if the message is malformed, if network errors occur, of if + there is a timeout. + + Returns a ``(dns.message.Message, float)`` tuple of the received message + and the received time. + """ + + ldata = _net_read(sock, 2, expiration) + (l,) = struct.unpack("!H", ldata) + wire = _net_read(sock, l, expiration) + received_time = time.time() + r = dns.message.from_wire( + wire, + keyring=keyring, + request_mac=request_mac, + one_rr_per_rrset=one_rr_per_rrset, + ignore_trailing=ignore_trailing, + ) + return (r, received_time) + + +def _connect(s, address, expiration): + err = s.connect_ex(address) + if err == 0: + return + if err in (errno.EINPROGRESS, errno.EWOULDBLOCK, errno.EALREADY): + _wait_for_writable(s, expiration) + err = s.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) + if err != 0: + raise OSError(err, os.strerror(err)) + + +def tcp( + q: dns.message.Message, + where: str, + timeout: Optional[float] = None, + port: int = 53, + source: Optional[str] = None, + source_port: int = 0, + one_rr_per_rrset: bool = False, + ignore_trailing: bool = False, + sock: Optional[Any] = None, +) -> dns.message.Message: + """Return the response obtained after sending a query via TCP. + + *q*, a ``dns.message.Message``, the query to send + + *where*, a ``str`` containing an IPv4 or IPv6 address, where + to send the message. + + *timeout*, a ``float`` or ``None``, the number of seconds to wait before the + query times out. If ``None``, the default, wait forever. + + *port*, an ``int``, the port send the message to. The default is 53. + + *source*, a ``str`` containing an IPv4 or IPv6 address, specifying + the source address. The default is the wildcard address. + + *source_port*, an ``int``, the port from which to send the message. + The default is 0. + + *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own + RRset. + + *ignore_trailing*, a ``bool``. If ``True``, ignore trailing + junk at end of the received message. + + *sock*, a ``socket.socket``, or ``None``, the connected socket to use for the + query. If ``None``, the default, a socket is created. Note that + if a socket is provided, it must be a nonblocking connected stream + socket, and *where*, *port*, *source* and *source_port* are ignored. + + Returns a ``dns.message.Message``. + """ + + wire = q.to_wire() + (begin_time, expiration) = _compute_times(timeout) + if sock: + cm: contextlib.AbstractContextManager = contextlib.nullcontext(sock) + else: + (af, destination, source) = _destination_and_source( + where, port, source, source_port + ) + cm = _make_socket(af, socket.SOCK_STREAM, source) + with cm as s: + if not sock: + # pylint: disable=possibly-used-before-assignment + _connect(s, destination, expiration) + send_tcp(s, wire, expiration) + (r, received_time) = receive_tcp( + s, expiration, one_rr_per_rrset, q.keyring, q.mac, ignore_trailing + ) + r.time = received_time - begin_time + if not q.is_response(r): + raise BadResponse + return r + assert ( + False # help mypy figure out we can't get here lgtm[py/unreachable-statement] + ) + + +def _tls_handshake(s, expiration): + while True: + try: + s.do_handshake() + return + except ssl.SSLWantReadError: + _wait_for_readable(s, expiration) + except ssl.SSLWantWriteError: # pragma: no cover + _wait_for_writable(s, expiration) + + +def _make_dot_ssl_context( + server_hostname: Optional[str], verify: Union[bool, str] +) -> ssl.SSLContext: + cafile: Optional[str] = None + capath: Optional[str] = None + if isinstance(verify, str): + if os.path.isfile(verify): + cafile = verify + elif os.path.isdir(verify): + capath = verify + else: + raise ValueError("invalid verify string") + ssl_context = ssl.create_default_context(cafile=cafile, capath=capath) + ssl_context.minimum_version = ssl.TLSVersion.TLSv1_2 + if server_hostname is None: + ssl_context.check_hostname = False + ssl_context.set_alpn_protocols(["dot"]) + if verify is False: + ssl_context.verify_mode = ssl.CERT_NONE + return ssl_context + + +def tls( + q: dns.message.Message, + where: str, + timeout: Optional[float] = None, + port: int = 853, + source: Optional[str] = None, + source_port: int = 0, + one_rr_per_rrset: bool = False, + ignore_trailing: bool = False, + sock: Optional[ssl.SSLSocket] = None, + ssl_context: Optional[ssl.SSLContext] = None, + server_hostname: Optional[str] = None, + verify: Union[bool, str] = True, +) -> dns.message.Message: + """Return the response obtained after sending a query via TLS. + + *q*, a ``dns.message.Message``, the query to send + + *where*, a ``str`` containing an IPv4 or IPv6 address, where + to send the message. + + *timeout*, a ``float`` or ``None``, the number of seconds to wait before the + query times out. If ``None``, the default, wait forever. + + *port*, an ``int``, the port send the message to. The default is 853. + + *source*, a ``str`` containing an IPv4 or IPv6 address, specifying + the source address. The default is the wildcard address. + + *source_port*, an ``int``, the port from which to send the message. + The default is 0. + + *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own + RRset. + + *ignore_trailing*, a ``bool``. If ``True``, ignore trailing + junk at end of the received message. + + *sock*, an ``ssl.SSLSocket``, or ``None``, the socket to use for + the query. If ``None``, the default, a socket is created. Note + that if a socket is provided, it must be a nonblocking connected + SSL stream socket, and *where*, *port*, *source*, *source_port*, + and *ssl_context* are ignored. + + *ssl_context*, an ``ssl.SSLContext``, the context to use when establishing + a TLS connection. If ``None``, the default, creates one with the default + configuration. + + *server_hostname*, a ``str`` containing the server's hostname. The + default is ``None``, which means that no hostname is known, and if an + SSL context is created, hostname checking will be disabled. + + *verify*, a ``bool`` or ``str``. If a ``True``, then TLS certificate verification + of the server is done using the default CA bundle; if ``False``, then no + verification is done; if a `str` then it specifies the path to a certificate file or + directory which will be used for verification. + + Returns a ``dns.message.Message``. + + """ + + if sock: + # + # If a socket was provided, there's no special TLS handling needed. + # + return tcp( + q, + where, + timeout, + port, + source, + source_port, + one_rr_per_rrset, + ignore_trailing, + sock, + ) + + wire = q.to_wire() + (begin_time, expiration) = _compute_times(timeout) + (af, destination, source) = _destination_and_source( + where, port, source, source_port + ) + if ssl_context is None and not sock: + ssl_context = _make_dot_ssl_context(server_hostname, verify) + + with _make_socket( + af, + socket.SOCK_STREAM, + source, + ssl_context=ssl_context, + server_hostname=server_hostname, + ) as s: + _connect(s, destination, expiration) + _tls_handshake(s, expiration) + send_tcp(s, wire, expiration) + (r, received_time) = receive_tcp( + s, expiration, one_rr_per_rrset, q.keyring, q.mac, ignore_trailing + ) + r.time = received_time - begin_time + if not q.is_response(r): + raise BadResponse + return r + assert ( + False # help mypy figure out we can't get here lgtm[py/unreachable-statement] + ) + + +def quic( + q: dns.message.Message, + where: str, + timeout: Optional[float] = None, + port: int = 853, + source: Optional[str] = None, + source_port: int = 0, + one_rr_per_rrset: bool = False, + ignore_trailing: bool = False, + connection: Optional[dns.quic.SyncQuicConnection] = None, + verify: Union[bool, str] = True, + hostname: Optional[str] = None, + server_hostname: Optional[str] = None, +) -> dns.message.Message: + """Return the response obtained after sending a query via DNS-over-QUIC. + + *q*, a ``dns.message.Message``, the query to send. + + *where*, a ``str``, the nameserver IP address. + + *timeout*, a ``float`` or ``None``, the number of seconds to wait before the query + times out. If ``None``, the default, wait forever. + + *port*, a ``int``, the port to send the query to. The default is 853. + + *source*, a ``str`` containing an IPv4 or IPv6 address, specifying the source + address. The default is the wildcard address. + + *source_port*, an ``int``, the port from which to send the message. The default is + 0. + + *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own RRset. + + *ignore_trailing*, a ``bool``. If ``True``, ignore trailing junk at end of the + received message. + + *connection*, a ``dns.quic.SyncQuicConnection``. If provided, the connection to use + to send the query. + + *verify*, a ``bool`` or ``str``. If a ``True``, then TLS certificate verification + of the server is done using the default CA bundle; if ``False``, then no + verification is done; if a `str` then it specifies the path to a certificate file or + directory which will be used for verification. + + *hostname*, a ``str`` containing the server's hostname or ``None``. The default is + ``None``, which means that no hostname is known, and if an SSL context is created, + hostname checking will be disabled. This value is ignored if *url* is not + ``None``. + + *server_hostname*, a ``str`` or ``None``. This item is for backwards compatibility + only, and has the same meaning as *hostname*. + + Returns a ``dns.message.Message``. + """ + + if not dns.quic.have_quic: + raise NoDOQ("DNS-over-QUIC is not available.") # pragma: no cover + + if server_hostname is not None and hostname is None: + hostname = server_hostname + + q.id = 0 + wire = q.to_wire() + the_connection: dns.quic.SyncQuicConnection + the_manager: dns.quic.SyncQuicManager + if connection: + manager: contextlib.AbstractContextManager = contextlib.nullcontext(None) + the_connection = connection + else: + manager = dns.quic.SyncQuicManager(verify_mode=verify, server_name=hostname) + the_manager = manager # for type checking happiness + + with manager: + if not connection: + the_connection = the_manager.connect(where, port, source, source_port) + (start, expiration) = _compute_times(timeout) + with the_connection.make_stream(timeout) as stream: + stream.send(wire, True) + wire = stream.receive(_remaining(expiration)) + finish = time.time() + r = dns.message.from_wire( + wire, + keyring=q.keyring, + request_mac=q.request_mac, + one_rr_per_rrset=one_rr_per_rrset, + ignore_trailing=ignore_trailing, + ) + r.time = max(finish - start, 0.0) + if not q.is_response(r): + raise BadResponse + return r + + +class UDPMode(enum.IntEnum): + """How should UDP be used in an IXFR from :py:func:`inbound_xfr()`? + + NEVER means "never use UDP; always use TCP" + TRY_FIRST means "try to use UDP but fall back to TCP if needed" + ONLY means "raise ``dns.xfr.UseTCP`` if trying UDP does not succeed" + """ + + NEVER = 0 + TRY_FIRST = 1 + ONLY = 2 + + +def _inbound_xfr( + txn_manager: dns.transaction.TransactionManager, + s: socket.socket, + query: dns.message.Message, + serial: Optional[int], + timeout: Optional[float], + expiration: float, +) -> Any: + """Given a socket, does the zone transfer.""" + rdtype = query.question[0].rdtype + is_ixfr = rdtype == dns.rdatatype.IXFR + origin = txn_manager.from_wire_origin() + wire = query.to_wire() + is_udp = s.type == socket.SOCK_DGRAM + if is_udp: + _udp_send(s, wire, None, expiration) + else: + tcpmsg = struct.pack("!H", len(wire)) + wire + _net_write(s, tcpmsg, expiration) + with dns.xfr.Inbound(txn_manager, rdtype, serial, is_udp) as inbound: + done = False + tsig_ctx = None + while not done: + (_, mexpiration) = _compute_times(timeout) + if mexpiration is None or ( + expiration is not None and mexpiration > expiration + ): + mexpiration = expiration + if is_udp: + (rwire, _) = _udp_recv(s, 65535, mexpiration) + else: + ldata = _net_read(s, 2, mexpiration) + (l,) = struct.unpack("!H", ldata) + rwire = _net_read(s, l, mexpiration) + r = dns.message.from_wire( + rwire, + keyring=query.keyring, + request_mac=query.mac, + xfr=True, + origin=origin, + tsig_ctx=tsig_ctx, + multi=(not is_udp), + one_rr_per_rrset=is_ixfr, + ) + done = inbound.process_message(r) + yield r + tsig_ctx = r.tsig_ctx + if query.keyring and not r.had_tsig: + raise dns.exception.FormError("missing TSIG") + + +def xfr( + where: str, + zone: Union[dns.name.Name, str], + rdtype: Union[dns.rdatatype.RdataType, str] = dns.rdatatype.AXFR, + rdclass: Union[dns.rdataclass.RdataClass, str] = dns.rdataclass.IN, + timeout: Optional[float] = None, + port: int = 53, + keyring: Optional[Dict[dns.name.Name, dns.tsig.Key]] = None, + keyname: Optional[Union[dns.name.Name, str]] = None, + relativize: bool = True, + lifetime: Optional[float] = None, + source: Optional[str] = None, + source_port: int = 0, + serial: int = 0, + use_udp: bool = False, + keyalgorithm: Union[dns.name.Name, str] = dns.tsig.default_algorithm, +) -> Any: + """Return a generator for the responses to a zone transfer. + + *where*, a ``str`` containing an IPv4 or IPv6 address, where + to send the message. + + *zone*, a ``dns.name.Name`` or ``str``, the name of the zone to transfer. + + *rdtype*, an ``int`` or ``str``, the type of zone transfer. The + default is ``dns.rdatatype.AXFR``. ``dns.rdatatype.IXFR`` can be + used to do an incremental transfer instead. + + *rdclass*, an ``int`` or ``str``, the class of the zone transfer. + The default is ``dns.rdataclass.IN``. + + *timeout*, a ``float``, the number of seconds to wait for each + response message. If None, the default, wait forever. + + *port*, an ``int``, the port send the message to. The default is 53. + + *keyring*, a ``dict``, the keyring to use for TSIG. + + *keyname*, a ``dns.name.Name`` or ``str``, the name of the TSIG + key to use. + + *relativize*, a ``bool``. If ``True``, all names in the zone will be + relativized to the zone origin. It is essential that the + relativize setting matches the one specified to + ``dns.zone.from_xfr()`` if using this generator to make a zone. + + *lifetime*, a ``float``, the total number of seconds to spend + doing the transfer. If ``None``, the default, then there is no + limit on the time the transfer may take. + + *source*, a ``str`` containing an IPv4 or IPv6 address, specifying + the source address. The default is the wildcard address. + + *source_port*, an ``int``, the port from which to send the message. + The default is 0. + + *serial*, an ``int``, the SOA serial number to use as the base for + an IXFR diff sequence (only meaningful if *rdtype* is + ``dns.rdatatype.IXFR``). + + *use_udp*, a ``bool``. If ``True``, use UDP (only meaningful for IXFR). + + *keyalgorithm*, a ``dns.name.Name`` or ``str``, the TSIG algorithm to use. + + Raises on errors, and so does the generator. + + Returns a generator of ``dns.message.Message`` objects. + """ + + class DummyTransactionManager(dns.transaction.TransactionManager): + def __init__(self, origin, relativize): + self.info = (origin, relativize, dns.name.empty if relativize else origin) + + def origin_information(self): + return self.info + + def get_class(self) -> dns.rdataclass.RdataClass: + raise NotImplementedError # pragma: no cover + + def reader(self): + raise NotImplementedError # pragma: no cover + + def writer(self, replacement: bool = False) -> dns.transaction.Transaction: + class DummyTransaction: + def nop(self, *args, **kw): + pass + + def __getattr__(self, _): + return self.nop + + return cast(dns.transaction.Transaction, DummyTransaction()) + + if isinstance(zone, str): + zone = dns.name.from_text(zone) + rdtype = dns.rdatatype.RdataType.make(rdtype) + q = dns.message.make_query(zone, rdtype, rdclass) + if rdtype == dns.rdatatype.IXFR: + rrset = q.find_rrset( + q.authority, zone, dns.rdataclass.IN, dns.rdatatype.SOA, create=True + ) + soa = dns.rdata.from_text("IN", "SOA", ". . %u 0 0 0 0" % serial) + rrset.add(soa, 0) + if keyring is not None: + q.use_tsig(keyring, keyname, algorithm=keyalgorithm) + (af, destination, source) = _destination_and_source( + where, port, source, source_port + ) + (_, expiration) = _compute_times(lifetime) + tm = DummyTransactionManager(zone, relativize) + if use_udp and rdtype != dns.rdatatype.IXFR: + raise ValueError("cannot do a UDP AXFR") + sock_type = socket.SOCK_DGRAM if use_udp else socket.SOCK_STREAM + with _make_socket(af, sock_type, source) as s: + _connect(s, destination, expiration) + yield from _inbound_xfr(tm, s, q, serial, timeout, expiration) + + +def inbound_xfr( + where: str, + txn_manager: dns.transaction.TransactionManager, + query: Optional[dns.message.Message] = None, + port: int = 53, + timeout: Optional[float] = None, + lifetime: Optional[float] = None, + source: Optional[str] = None, + source_port: int = 0, + udp_mode: UDPMode = UDPMode.NEVER, +) -> None: + """Conduct an inbound transfer and apply it via a transaction from the + txn_manager. + + *where*, a ``str`` containing an IPv4 or IPv6 address, where + to send the message. + + *txn_manager*, a ``dns.transaction.TransactionManager``, the txn_manager + for this transfer (typically a ``dns.zone.Zone``). + + *query*, the query to send. If not supplied, a default query is + constructed using information from the *txn_manager*. + + *port*, an ``int``, the port send the message to. The default is 53. + + *timeout*, a ``float``, the number of seconds to wait for each + response message. If None, the default, wait forever. + + *lifetime*, a ``float``, the total number of seconds to spend + doing the transfer. If ``None``, the default, then there is no + limit on the time the transfer may take. + + *source*, a ``str`` containing an IPv4 or IPv6 address, specifying + the source address. The default is the wildcard address. + + *source_port*, an ``int``, the port from which to send the message. + The default is 0. + + *udp_mode*, a ``dns.query.UDPMode``, determines how UDP is used + for IXFRs. The default is ``dns.UDPMode.NEVER``, i.e. only use + TCP. Other possibilities are ``dns.UDPMode.TRY_FIRST``, which + means "try UDP but fallback to TCP if needed", and + ``dns.UDPMode.ONLY``, which means "try UDP and raise + ``dns.xfr.UseTCP`` if it does not succeed. + + Raises on errors. + """ + if query is None: + (query, serial) = dns.xfr.make_query(txn_manager) + else: + serial = dns.xfr.extract_serial_from_query(query) + + (af, destination, source) = _destination_and_source( + where, port, source, source_port + ) + (_, expiration) = _compute_times(lifetime) + if query.question[0].rdtype == dns.rdatatype.IXFR and udp_mode != UDPMode.NEVER: + with _make_socket(af, socket.SOCK_DGRAM, source) as s: + _connect(s, destination, expiration) + try: + for _ in _inbound_xfr( + txn_manager, s, query, serial, timeout, expiration + ): + pass + return + except dns.xfr.UseTCP: + if udp_mode == UDPMode.ONLY: + raise + + with _make_socket(af, socket.SOCK_STREAM, source) as s: + _connect(s, destination, expiration) + for _ in _inbound_xfr(txn_manager, s, query, serial, timeout, expiration): + pass diff --git a/vllm/lib/python3.10/site-packages/dns/rdataclass.py b/vllm/lib/python3.10/site-packages/dns/rdataclass.py new file mode 100644 index 0000000000000000000000000000000000000000..89b85a79c27ca8eb40bd85d65a17b6280fc50a43 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/dns/rdataclass.py @@ -0,0 +1,118 @@ +# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license + +# Copyright (C) 2001-2017 Nominum, Inc. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose with or without fee is hereby granted, +# provided that the above copyright notice and this permission notice +# appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""DNS Rdata Classes.""" + +import dns.enum +import dns.exception + + +class RdataClass(dns.enum.IntEnum): + """DNS Rdata Class""" + + RESERVED0 = 0 + IN = 1 + INTERNET = IN + CH = 3 + CHAOS = CH + HS = 4 + HESIOD = HS + NONE = 254 + ANY = 255 + + @classmethod + def _maximum(cls): + return 65535 + + @classmethod + def _short_name(cls): + return "class" + + @classmethod + def _prefix(cls): + return "CLASS" + + @classmethod + def _unknown_exception_class(cls): + return UnknownRdataclass + + +_metaclasses = {RdataClass.NONE, RdataClass.ANY} + + +class UnknownRdataclass(dns.exception.DNSException): + """A DNS class is unknown.""" + + +def from_text(text: str) -> RdataClass: + """Convert text into a DNS rdata class value. + + The input text can be a defined DNS RR class mnemonic or + instance of the DNS generic class syntax. + + For example, "IN" and "CLASS1" will both result in a value of 1. + + Raises ``dns.rdatatype.UnknownRdataclass`` if the class is unknown. + + Raises ``ValueError`` if the rdata class value is not >= 0 and <= 65535. + + Returns a ``dns.rdataclass.RdataClass``. + """ + + return RdataClass.from_text(text) + + +def to_text(value: RdataClass) -> str: + """Convert a DNS rdata class value to text. + + If the value has a known mnemonic, it will be used, otherwise the + DNS generic class syntax will be used. + + Raises ``ValueError`` if the rdata class value is not >= 0 and <= 65535. + + Returns a ``str``. + """ + + return RdataClass.to_text(value) + + +def is_metaclass(rdclass: RdataClass) -> bool: + """True if the specified class is a metaclass. + + The currently defined metaclasses are ANY and NONE. + + *rdclass* is a ``dns.rdataclass.RdataClass``. + """ + + if rdclass in _metaclasses: + return True + return False + + +### BEGIN generated RdataClass constants + +RESERVED0 = RdataClass.RESERVED0 +IN = RdataClass.IN +INTERNET = RdataClass.INTERNET +CH = RdataClass.CH +CHAOS = RdataClass.CHAOS +HS = RdataClass.HS +HESIOD = RdataClass.HESIOD +NONE = RdataClass.NONE +ANY = RdataClass.ANY + +### END generated RdataClass constants diff --git a/vllm/lib/python3.10/site-packages/dns/rdataset.py b/vllm/lib/python3.10/site-packages/dns/rdataset.py new file mode 100644 index 0000000000000000000000000000000000000000..39cab2365aee8757615cfe0113c53857cf29d184 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/dns/rdataset.py @@ -0,0 +1,512 @@ +# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license + +# Copyright (C) 2001-2017 Nominum, Inc. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose with or without fee is hereby granted, +# provided that the above copyright notice and this permission notice +# appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""DNS rdatasets (an rdataset is a set of rdatas of a given type and class)""" + +import io +import random +import struct +from typing import Any, Collection, Dict, List, Optional, Union, cast + +import dns.exception +import dns.immutable +import dns.name +import dns.rdata +import dns.rdataclass +import dns.rdatatype +import dns.renderer +import dns.set +import dns.ttl + +# define SimpleSet here for backwards compatibility +SimpleSet = dns.set.Set + + +class DifferingCovers(dns.exception.DNSException): + """An attempt was made to add a DNS SIG/RRSIG whose covered type + is not the same as that of the other rdatas in the rdataset.""" + + +class IncompatibleTypes(dns.exception.DNSException): + """An attempt was made to add DNS RR data of an incompatible type.""" + + +class Rdataset(dns.set.Set): + """A DNS rdataset.""" + + __slots__ = ["rdclass", "rdtype", "covers", "ttl"] + + def __init__( + self, + rdclass: dns.rdataclass.RdataClass, + rdtype: dns.rdatatype.RdataType, + covers: dns.rdatatype.RdataType = dns.rdatatype.NONE, + ttl: int = 0, + ): + """Create a new rdataset of the specified class and type. + + *rdclass*, a ``dns.rdataclass.RdataClass``, the rdataclass. + + *rdtype*, an ``dns.rdatatype.RdataType``, the rdatatype. + + *covers*, an ``dns.rdatatype.RdataType``, the covered rdatatype. + + *ttl*, an ``int``, the TTL. + """ + + super().__init__() + self.rdclass = rdclass + self.rdtype: dns.rdatatype.RdataType = rdtype + self.covers: dns.rdatatype.RdataType = covers + self.ttl = ttl + + def _clone(self): + obj = super()._clone() + obj.rdclass = self.rdclass + obj.rdtype = self.rdtype + obj.covers = self.covers + obj.ttl = self.ttl + return obj + + def update_ttl(self, ttl: int) -> None: + """Perform TTL minimization. + + Set the TTL of the rdataset to be the lesser of the set's current + TTL or the specified TTL. If the set contains no rdatas, set the TTL + to the specified TTL. + + *ttl*, an ``int`` or ``str``. + """ + ttl = dns.ttl.make(ttl) + if len(self) == 0: + self.ttl = ttl + elif ttl < self.ttl: + self.ttl = ttl + + def add( # pylint: disable=arguments-differ,arguments-renamed + self, rd: dns.rdata.Rdata, ttl: Optional[int] = None + ) -> None: + """Add the specified rdata to the rdataset. + + If the optional *ttl* parameter is supplied, then + ``self.update_ttl(ttl)`` will be called prior to adding the rdata. + + *rd*, a ``dns.rdata.Rdata``, the rdata + + *ttl*, an ``int``, the TTL. + + Raises ``dns.rdataset.IncompatibleTypes`` if the type and class + do not match the type and class of the rdataset. + + Raises ``dns.rdataset.DifferingCovers`` if the type is a signature + type and the covered type does not match that of the rdataset. + """ + + # + # If we're adding a signature, do some special handling to + # check that the signature covers the same type as the + # other rdatas in this rdataset. If this is the first rdata + # in the set, initialize the covers field. + # + if self.rdclass != rd.rdclass or self.rdtype != rd.rdtype: + raise IncompatibleTypes + if ttl is not None: + self.update_ttl(ttl) + if self.rdtype == dns.rdatatype.RRSIG or self.rdtype == dns.rdatatype.SIG: + covers = rd.covers() + if len(self) == 0 and self.covers == dns.rdatatype.NONE: + self.covers = covers + elif self.covers != covers: + raise DifferingCovers + if dns.rdatatype.is_singleton(rd.rdtype) and len(self) > 0: + self.clear() + super().add(rd) + + def union_update(self, other): + self.update_ttl(other.ttl) + super().union_update(other) + + def intersection_update(self, other): + self.update_ttl(other.ttl) + super().intersection_update(other) + + def update(self, other): + """Add all rdatas in other to self. + + *other*, a ``dns.rdataset.Rdataset``, the rdataset from which + to update. + """ + + self.update_ttl(other.ttl) + super().update(other) + + def _rdata_repr(self): + def maybe_truncate(s): + if len(s) > 100: + return s[:100] + "..." + return s + + return "[" + ", ".join(f"<{maybe_truncate(str(rr))}>" for rr in self) + "]" + + def __repr__(self): + if self.covers == 0: + ctext = "" + else: + ctext = "(" + dns.rdatatype.to_text(self.covers) + ")" + return ( + "" + ) + + def __str__(self): + return self.to_text() + + def __eq__(self, other): + if not isinstance(other, Rdataset): + return False + if ( + self.rdclass != other.rdclass + or self.rdtype != other.rdtype + or self.covers != other.covers + ): + return False + return super().__eq__(other) + + def __ne__(self, other): + return not self.__eq__(other) + + def to_text( + self, + name: Optional[dns.name.Name] = None, + origin: Optional[dns.name.Name] = None, + relativize: bool = True, + override_rdclass: Optional[dns.rdataclass.RdataClass] = None, + want_comments: bool = False, + **kw: Dict[str, Any], + ) -> str: + """Convert the rdataset into DNS zone file format. + + See ``dns.name.Name.choose_relativity`` for more information + on how *origin* and *relativize* determine the way names + are emitted. + + Any additional keyword arguments are passed on to the rdata + ``to_text()`` method. + + *name*, a ``dns.name.Name``. If name is not ``None``, emit RRs with + *name* as the owner name. + + *origin*, a ``dns.name.Name`` or ``None``, the origin for relative + names. + + *relativize*, a ``bool``. If ``True``, names will be relativized + to *origin*. + + *override_rdclass*, a ``dns.rdataclass.RdataClass`` or ``None``. + If not ``None``, use this class instead of the Rdataset's class. + + *want_comments*, a ``bool``. If ``True``, emit comments for rdata + which have them. The default is ``False``. + """ + + if name is not None: + name = name.choose_relativity(origin, relativize) + ntext = str(name) + pad = " " + else: + ntext = "" + pad = "" + s = io.StringIO() + if override_rdclass is not None: + rdclass = override_rdclass + else: + rdclass = self.rdclass + if len(self) == 0: + # + # Empty rdatasets are used for the question section, and in + # some dynamic updates, so we don't need to print out the TTL + # (which is meaningless anyway). + # + s.write( + f"{ntext}{pad}{dns.rdataclass.to_text(rdclass)} " + f"{dns.rdatatype.to_text(self.rdtype)}\n" + ) + else: + for rd in self: + extra = "" + if want_comments: + if rd.rdcomment: + extra = f" ;{rd.rdcomment}" + s.write( + "%s%s%d %s %s %s%s\n" + % ( + ntext, + pad, + self.ttl, + dns.rdataclass.to_text(rdclass), + dns.rdatatype.to_text(self.rdtype), + rd.to_text(origin=origin, relativize=relativize, **kw), + extra, + ) + ) + # + # We strip off the final \n for the caller's convenience in printing + # + return s.getvalue()[:-1] + + def to_wire( + self, + name: dns.name.Name, + file: Any, + compress: Optional[dns.name.CompressType] = None, + origin: Optional[dns.name.Name] = None, + override_rdclass: Optional[dns.rdataclass.RdataClass] = None, + want_shuffle: bool = True, + ) -> int: + """Convert the rdataset to wire format. + + *name*, a ``dns.name.Name`` is the owner name to use. + + *file* is the file where the name is emitted (typically a + BytesIO file). + + *compress*, a ``dict``, is the compression table to use. If + ``None`` (the default), names will not be compressed. + + *origin* is a ``dns.name.Name`` or ``None``. If the name is + relative and origin is not ``None``, then *origin* will be appended + to it. + + *override_rdclass*, an ``int``, is used as the class instead of the + class of the rdataset. This is useful when rendering rdatasets + associated with dynamic updates. + + *want_shuffle*, a ``bool``. If ``True``, then the order of the + Rdatas within the Rdataset will be shuffled before rendering. + + Returns an ``int``, the number of records emitted. + """ + + if override_rdclass is not None: + rdclass = override_rdclass + want_shuffle = False + else: + rdclass = self.rdclass + if len(self) == 0: + name.to_wire(file, compress, origin) + file.write(struct.pack("!HHIH", self.rdtype, rdclass, 0, 0)) + return 1 + else: + l: Union[Rdataset, List[dns.rdata.Rdata]] + if want_shuffle: + l = list(self) + random.shuffle(l) + else: + l = self + for rd in l: + name.to_wire(file, compress, origin) + file.write(struct.pack("!HHI", self.rdtype, rdclass, self.ttl)) + with dns.renderer.prefixed_length(file, 2): + rd.to_wire(file, compress, origin) + return len(self) + + def match( + self, + rdclass: dns.rdataclass.RdataClass, + rdtype: dns.rdatatype.RdataType, + covers: dns.rdatatype.RdataType, + ) -> bool: + """Returns ``True`` if this rdataset matches the specified class, + type, and covers. + """ + if self.rdclass == rdclass and self.rdtype == rdtype and self.covers == covers: + return True + return False + + def processing_order(self) -> List[dns.rdata.Rdata]: + """Return rdatas in a valid processing order according to the type's + specification. For example, MX records are in preference order from + lowest to highest preferences, with items of the same preference + shuffled. + + For types that do not define a processing order, the rdatas are + simply shuffled. + """ + if len(self) == 0: + return [] + else: + return self[0]._processing_order(iter(self)) + + +@dns.immutable.immutable +class ImmutableRdataset(Rdataset): # lgtm[py/missing-equals] + """An immutable DNS rdataset.""" + + _clone_class = Rdataset + + def __init__(self, rdataset: Rdataset): + """Create an immutable rdataset from the specified rdataset.""" + + super().__init__( + rdataset.rdclass, rdataset.rdtype, rdataset.covers, rdataset.ttl + ) + self.items = dns.immutable.Dict(rdataset.items) + + def update_ttl(self, ttl): + raise TypeError("immutable") + + def add(self, rd, ttl=None): + raise TypeError("immutable") + + def union_update(self, other): + raise TypeError("immutable") + + def intersection_update(self, other): + raise TypeError("immutable") + + def update(self, other): + raise TypeError("immutable") + + def __delitem__(self, i): + raise TypeError("immutable") + + # lgtm complains about these not raising ArithmeticError, but there is + # precedent for overrides of these methods in other classes to raise + # TypeError, and it seems like the better exception. + + def __ior__(self, other): # lgtm[py/unexpected-raise-in-special-method] + raise TypeError("immutable") + + def __iand__(self, other): # lgtm[py/unexpected-raise-in-special-method] + raise TypeError("immutable") + + def __iadd__(self, other): # lgtm[py/unexpected-raise-in-special-method] + raise TypeError("immutable") + + def __isub__(self, other): # lgtm[py/unexpected-raise-in-special-method] + raise TypeError("immutable") + + def clear(self): + raise TypeError("immutable") + + def __copy__(self): + return ImmutableRdataset(super().copy()) + + def copy(self): + return ImmutableRdataset(super().copy()) + + def union(self, other): + return ImmutableRdataset(super().union(other)) + + def intersection(self, other): + return ImmutableRdataset(super().intersection(other)) + + def difference(self, other): + return ImmutableRdataset(super().difference(other)) + + def symmetric_difference(self, other): + return ImmutableRdataset(super().symmetric_difference(other)) + + +def from_text_list( + rdclass: Union[dns.rdataclass.RdataClass, str], + rdtype: Union[dns.rdatatype.RdataType, str], + ttl: int, + text_rdatas: Collection[str], + idna_codec: Optional[dns.name.IDNACodec] = None, + origin: Optional[dns.name.Name] = None, + relativize: bool = True, + relativize_to: Optional[dns.name.Name] = None, +) -> Rdataset: + """Create an rdataset with the specified class, type, and TTL, and with + the specified list of rdatas in text format. + + *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA + encoder/decoder to use; if ``None``, the default IDNA 2003 + encoder/decoder is used. + + *origin*, a ``dns.name.Name`` (or ``None``), the + origin to use for relative names. + + *relativize*, a ``bool``. If true, name will be relativized. + + *relativize_to*, a ``dns.name.Name`` (or ``None``), the origin to use + when relativizing names. If not set, the *origin* value will be used. + + Returns a ``dns.rdataset.Rdataset`` object. + """ + + rdclass = dns.rdataclass.RdataClass.make(rdclass) + rdtype = dns.rdatatype.RdataType.make(rdtype) + r = Rdataset(rdclass, rdtype) + r.update_ttl(ttl) + for t in text_rdatas: + rd = dns.rdata.from_text( + r.rdclass, r.rdtype, t, origin, relativize, relativize_to, idna_codec + ) + r.add(rd) + return r + + +def from_text( + rdclass: Union[dns.rdataclass.RdataClass, str], + rdtype: Union[dns.rdatatype.RdataType, str], + ttl: int, + *text_rdatas: Any, +) -> Rdataset: + """Create an rdataset with the specified class, type, and TTL, and with + the specified rdatas in text format. + + Returns a ``dns.rdataset.Rdataset`` object. + """ + + return from_text_list(rdclass, rdtype, ttl, cast(Collection[str], text_rdatas)) + + +def from_rdata_list(ttl: int, rdatas: Collection[dns.rdata.Rdata]) -> Rdataset: + """Create an rdataset with the specified TTL, and with + the specified list of rdata objects. + + Returns a ``dns.rdataset.Rdataset`` object. + """ + + if len(rdatas) == 0: + raise ValueError("rdata list must not be empty") + r = None + for rd in rdatas: + if r is None: + r = Rdataset(rd.rdclass, rd.rdtype) + r.update_ttl(ttl) + r.add(rd) + assert r is not None + return r + + +def from_rdata(ttl: int, *rdatas: Any) -> Rdataset: + """Create an rdataset with the specified TTL, and with + the specified rdata objects. + + Returns a ``dns.rdataset.Rdataset`` object. + """ + + return from_rdata_list(ttl, cast(Collection[dns.rdata.Rdata], rdatas)) diff --git a/vllm/lib/python3.10/site-packages/dns/renderer.py b/vllm/lib/python3.10/site-packages/dns/renderer.py new file mode 100644 index 0000000000000000000000000000000000000000..a77481f67c8cdab5205ce8453550eb6f02ec566c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/dns/renderer.py @@ -0,0 +1,346 @@ +# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license + +# Copyright (C) 2001-2017 Nominum, Inc. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose with or without fee is hereby granted, +# provided that the above copyright notice and this permission notice +# appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""Help for building DNS wire format messages""" + +import contextlib +import io +import random +import struct +import time + +import dns.exception +import dns.tsig + +QUESTION = 0 +ANSWER = 1 +AUTHORITY = 2 +ADDITIONAL = 3 + + +@contextlib.contextmanager +def prefixed_length(output, length_length): + output.write(b"\00" * length_length) + start = output.tell() + yield + end = output.tell() + length = end - start + if length > 0: + try: + output.seek(start - length_length) + try: + output.write(length.to_bytes(length_length, "big")) + except OverflowError: + raise dns.exception.FormError + finally: + output.seek(end) + + +class Renderer: + """Helper class for building DNS wire-format messages. + + Most applications can use the higher-level L{dns.message.Message} + class and its to_wire() method to generate wire-format messages. + This class is for those applications which need finer control + over the generation of messages. + + Typical use:: + + r = dns.renderer.Renderer(id=1, flags=0x80, max_size=512) + r.add_question(qname, qtype, qclass) + r.add_rrset(dns.renderer.ANSWER, rrset_1) + r.add_rrset(dns.renderer.ANSWER, rrset_2) + r.add_rrset(dns.renderer.AUTHORITY, ns_rrset) + r.add_rrset(dns.renderer.ADDITIONAL, ad_rrset_1) + r.add_rrset(dns.renderer.ADDITIONAL, ad_rrset_2) + r.add_edns(0, 0, 4096) + r.write_header() + r.add_tsig(keyname, secret, 300, 1, 0, '', request_mac) + wire = r.get_wire() + + If padding is going to be used, then the OPT record MUST be + written after everything else in the additional section except for + the TSIG (if any). + + output, an io.BytesIO, where rendering is written + + id: the message id + + flags: the message flags + + max_size: the maximum size of the message + + origin: the origin to use when rendering relative names + + compress: the compression table + + section: an int, the section currently being rendered + + counts: list of the number of RRs in each section + + mac: the MAC of the rendered message (if TSIG was used) + """ + + def __init__(self, id=None, flags=0, max_size=65535, origin=None): + """Initialize a new renderer.""" + + self.output = io.BytesIO() + if id is None: + self.id = random.randint(0, 65535) + else: + self.id = id + self.flags = flags + self.max_size = max_size + self.origin = origin + self.compress = {} + self.section = QUESTION + self.counts = [0, 0, 0, 0] + self.output.write(b"\x00" * 12) + self.mac = "" + self.reserved = 0 + self.was_padded = False + + def _rollback(self, where): + """Truncate the output buffer at offset *where*, and remove any + compression table entries that pointed beyond the truncation + point. + """ + + self.output.seek(where) + self.output.truncate() + keys_to_delete = [] + for k, v in self.compress.items(): + if v >= where: + keys_to_delete.append(k) + for k in keys_to_delete: + del self.compress[k] + + def _set_section(self, section): + """Set the renderer's current section. + + Sections must be rendered order: QUESTION, ANSWER, AUTHORITY, + ADDITIONAL. Sections may be empty. + + Raises dns.exception.FormError if an attempt was made to set + a section value less than the current section. + """ + + if self.section != section: + if self.section > section: + raise dns.exception.FormError + self.section = section + + @contextlib.contextmanager + def _track_size(self): + start = self.output.tell() + yield start + if self.output.tell() > self.max_size: + self._rollback(start) + raise dns.exception.TooBig + + @contextlib.contextmanager + def _temporarily_seek_to(self, where): + current = self.output.tell() + try: + self.output.seek(where) + yield + finally: + self.output.seek(current) + + def add_question(self, qname, rdtype, rdclass=dns.rdataclass.IN): + """Add a question to the message.""" + + self._set_section(QUESTION) + with self._track_size(): + qname.to_wire(self.output, self.compress, self.origin) + self.output.write(struct.pack("!HH", rdtype, rdclass)) + self.counts[QUESTION] += 1 + + def add_rrset(self, section, rrset, **kw): + """Add the rrset to the specified section. + + Any keyword arguments are passed on to the rdataset's to_wire() + routine. + """ + + self._set_section(section) + with self._track_size(): + n = rrset.to_wire(self.output, self.compress, self.origin, **kw) + self.counts[section] += n + + def add_rdataset(self, section, name, rdataset, **kw): + """Add the rdataset to the specified section, using the specified + name as the owner name. + + Any keyword arguments are passed on to the rdataset's to_wire() + routine. + """ + + self._set_section(section) + with self._track_size(): + n = rdataset.to_wire(name, self.output, self.compress, self.origin, **kw) + self.counts[section] += n + + def add_opt(self, opt, pad=0, opt_size=0, tsig_size=0): + """Add *opt* to the additional section, applying padding if desired. The + padding will take the specified precomputed OPT size and TSIG size into + account. + + Note that we don't have reliable way of knowing how big a GSS-TSIG digest + might be, so we we might not get an even multiple of the pad in that case.""" + if pad: + ttl = opt.ttl + assert opt_size >= 11 + opt_rdata = opt[0] + size_without_padding = self.output.tell() + opt_size + tsig_size + remainder = size_without_padding % pad + if remainder: + pad = b"\x00" * (pad - remainder) + else: + pad = b"" + options = list(opt_rdata.options) + options.append(dns.edns.GenericOption(dns.edns.OptionType.PADDING, pad)) + opt = dns.message.Message._make_opt(ttl, opt_rdata.rdclass, options) + self.was_padded = True + self.add_rrset(ADDITIONAL, opt) + + def add_edns(self, edns, ednsflags, payload, options=None): + """Add an EDNS OPT record to the message.""" + + # make sure the EDNS version in ednsflags agrees with edns + ednsflags &= 0xFF00FFFF + ednsflags |= edns << 16 + opt = dns.message.Message._make_opt(ednsflags, payload, options) + self.add_opt(opt) + + def add_tsig( + self, + keyname, + secret, + fudge, + id, + tsig_error, + other_data, + request_mac, + algorithm=dns.tsig.default_algorithm, + ): + """Add a TSIG signature to the message.""" + + s = self.output.getvalue() + + if isinstance(secret, dns.tsig.Key): + key = secret + else: + key = dns.tsig.Key(keyname, secret, algorithm) + tsig = dns.message.Message._make_tsig( + keyname, algorithm, 0, fudge, b"", id, tsig_error, other_data + ) + (tsig, _) = dns.tsig.sign(s, key, tsig[0], int(time.time()), request_mac) + self._write_tsig(tsig, keyname) + + def add_multi_tsig( + self, + ctx, + keyname, + secret, + fudge, + id, + tsig_error, + other_data, + request_mac, + algorithm=dns.tsig.default_algorithm, + ): + """Add a TSIG signature to the message. Unlike add_tsig(), this can be + used for a series of consecutive DNS envelopes, e.g. for a zone + transfer over TCP [RFC2845, 4.4]. + + For the first message in the sequence, give ctx=None. For each + subsequent message, give the ctx that was returned from the + add_multi_tsig() call for the previous message.""" + + s = self.output.getvalue() + + if isinstance(secret, dns.tsig.Key): + key = secret + else: + key = dns.tsig.Key(keyname, secret, algorithm) + tsig = dns.message.Message._make_tsig( + keyname, algorithm, 0, fudge, b"", id, tsig_error, other_data + ) + (tsig, ctx) = dns.tsig.sign( + s, key, tsig[0], int(time.time()), request_mac, ctx, True + ) + self._write_tsig(tsig, keyname) + return ctx + + def _write_tsig(self, tsig, keyname): + if self.was_padded: + compress = None + else: + compress = self.compress + self._set_section(ADDITIONAL) + with self._track_size(): + keyname.to_wire(self.output, compress, self.origin) + self.output.write( + struct.pack("!HHI", dns.rdatatype.TSIG, dns.rdataclass.ANY, 0) + ) + with prefixed_length(self.output, 2): + tsig.to_wire(self.output) + + self.counts[ADDITIONAL] += 1 + with self._temporarily_seek_to(10): + self.output.write(struct.pack("!H", self.counts[ADDITIONAL])) + + def write_header(self): + """Write the DNS message header. + + Writing the DNS message header is done after all sections + have been rendered, but before the optional TSIG signature + is added. + """ + + with self._temporarily_seek_to(0): + self.output.write( + struct.pack( + "!HHHHHH", + self.id, + self.flags, + self.counts[0], + self.counts[1], + self.counts[2], + self.counts[3], + ) + ) + + def get_wire(self): + """Return the wire format message.""" + + return self.output.getvalue() + + def reserve(self, size: int) -> None: + """Reserve *size* bytes.""" + if size < 0: + raise ValueError("reserved amount must be non-negative") + if size > self.max_size: + raise ValueError("cannot reserve more than the maximum size") + self.reserved += size + self.max_size -= size + + def release_reserved(self) -> None: + """Release the reserved bytes.""" + self.max_size += self.reserved + self.reserved = 0 diff --git a/vllm/lib/python3.10/site-packages/dns/resolver.py b/vllm/lib/python3.10/site-packages/dns/resolver.py new file mode 100644 index 0000000000000000000000000000000000000000..3ba76e31e6db99b47108609312ddfdd6e68dfa4f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/dns/resolver.py @@ -0,0 +1,2053 @@ +# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license + +# Copyright (C) 2003-2017 Nominum, Inc. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose with or without fee is hereby granted, +# provided that the above copyright notice and this permission notice +# appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""DNS stub resolver.""" + +import contextlib +import random +import socket +import sys +import threading +import time +import warnings +from typing import Any, Dict, Iterator, List, Optional, Sequence, Tuple, Union +from urllib.parse import urlparse + +import dns._ddr +import dns.edns +import dns.exception +import dns.flags +import dns.inet +import dns.ipv4 +import dns.ipv6 +import dns.message +import dns.name +import dns.rdata +import dns.nameserver +import dns.query +import dns.rcode +import dns.rdataclass +import dns.rdatatype +import dns.rdtypes.svcbbase +import dns.reversename +import dns.tsig + +if sys.platform == "win32": # pragma: no cover + import dns.win32util + + +class NXDOMAIN(dns.exception.DNSException): + """The DNS query name does not exist.""" + + supp_kwargs = {"qnames", "responses"} + fmt = None # we have our own __str__ implementation + + # pylint: disable=arguments-differ + + # We do this as otherwise mypy complains about unexpected keyword argument + # idna_exception + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def _check_kwargs(self, qnames, responses=None): + if not isinstance(qnames, (list, tuple, set)): + raise AttributeError("qnames must be a list, tuple or set") + if len(qnames) == 0: + raise AttributeError("qnames must contain at least one element") + if responses is None: + responses = {} + elif not isinstance(responses, dict): + raise AttributeError("responses must be a dict(qname=response)") + kwargs = dict(qnames=qnames, responses=responses) + return kwargs + + def __str__(self) -> str: + if "qnames" not in self.kwargs: + return super().__str__() + qnames = self.kwargs["qnames"] + if len(qnames) > 1: + msg = "None of DNS query names exist" + else: + msg = "The DNS query name does not exist" + qnames = ", ".join(map(str, qnames)) + return f"{msg}: {qnames}" + + @property + def canonical_name(self): + """Return the unresolved canonical name.""" + if "qnames" not in self.kwargs: + raise TypeError("parametrized exception required") + for qname in self.kwargs["qnames"]: + response = self.kwargs["responses"][qname] + try: + cname = response.canonical_name() + if cname != qname: + return cname + except Exception: # pragma: no cover + # We can just eat this exception as it means there was + # something wrong with the response. + pass + return self.kwargs["qnames"][0] + + def __add__(self, e_nx): + """Augment by results from another NXDOMAIN exception.""" + qnames0 = list(self.kwargs.get("qnames", [])) + responses0 = dict(self.kwargs.get("responses", {})) + responses1 = e_nx.kwargs.get("responses", {}) + for qname1 in e_nx.kwargs.get("qnames", []): + if qname1 not in qnames0: + qnames0.append(qname1) + if qname1 in responses1: + responses0[qname1] = responses1[qname1] + return NXDOMAIN(qnames=qnames0, responses=responses0) + + def qnames(self): + """All of the names that were tried. + + Returns a list of ``dns.name.Name``. + """ + return self.kwargs["qnames"] + + def responses(self): + """A map from queried names to their NXDOMAIN responses. + + Returns a dict mapping a ``dns.name.Name`` to a + ``dns.message.Message``. + """ + return self.kwargs["responses"] + + def response(self, qname): + """The response for query *qname*. + + Returns a ``dns.message.Message``. + """ + return self.kwargs["responses"][qname] + + +class YXDOMAIN(dns.exception.DNSException): + """The DNS query name is too long after DNAME substitution.""" + + +ErrorTuple = Tuple[ + Optional[str], + bool, + int, + Union[Exception, str], + Optional[dns.message.Message], +] + + +def _errors_to_text(errors: List[ErrorTuple]) -> List[str]: + """Turn a resolution errors trace into a list of text.""" + texts = [] + for err in errors: + texts.append(f"Server {err[0]} answered {err[3]}") + return texts + + +class LifetimeTimeout(dns.exception.Timeout): + """The resolution lifetime expired.""" + + msg = "The resolution lifetime expired." + fmt = f"{msg[:-1]} after {{timeout:.3f}} seconds: {{errors}}" + supp_kwargs = {"timeout", "errors"} + + # We do this as otherwise mypy complains about unexpected keyword argument + # idna_exception + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def _fmt_kwargs(self, **kwargs): + srv_msgs = _errors_to_text(kwargs["errors"]) + return super()._fmt_kwargs( + timeout=kwargs["timeout"], errors="; ".join(srv_msgs) + ) + + +# We added more detail to resolution timeouts, but they are still +# subclasses of dns.exception.Timeout for backwards compatibility. We also +# keep dns.resolver.Timeout defined for backwards compatibility. +Timeout = LifetimeTimeout + + +class NoAnswer(dns.exception.DNSException): + """The DNS response does not contain an answer to the question.""" + + fmt = "The DNS response does not contain an answer to the question: {query}" + supp_kwargs = {"response"} + + # We do this as otherwise mypy complains about unexpected keyword argument + # idna_exception + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def _fmt_kwargs(self, **kwargs): + return super()._fmt_kwargs(query=kwargs["response"].question) + + def response(self): + return self.kwargs["response"] + + +class NoNameservers(dns.exception.DNSException): + """All nameservers failed to answer the query. + + errors: list of servers and respective errors + The type of errors is + [(server IP address, any object convertible to string)]. + Non-empty errors list will add explanatory message () + """ + + msg = "All nameservers failed to answer the query." + fmt = f"{msg[:-1]} {{query}}: {{errors}}" + supp_kwargs = {"request", "errors"} + + # We do this as otherwise mypy complains about unexpected keyword argument + # idna_exception + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def _fmt_kwargs(self, **kwargs): + srv_msgs = _errors_to_text(kwargs["errors"]) + return super()._fmt_kwargs( + query=kwargs["request"].question, errors="; ".join(srv_msgs) + ) + + +class NotAbsolute(dns.exception.DNSException): + """An absolute domain name is required but a relative name was provided.""" + + +class NoRootSOA(dns.exception.DNSException): + """There is no SOA RR at the DNS root name. This should never happen!""" + + +class NoMetaqueries(dns.exception.DNSException): + """DNS metaqueries are not allowed.""" + + +class NoResolverConfiguration(dns.exception.DNSException): + """Resolver configuration could not be read or specified no nameservers.""" + + +class Answer: + """DNS stub resolver answer. + + Instances of this class bundle up the result of a successful DNS + resolution. + + For convenience, the answer object implements much of the sequence + protocol, forwarding to its ``rrset`` attribute. E.g. + ``for a in answer`` is equivalent to ``for a in answer.rrset``. + ``answer[i]`` is equivalent to ``answer.rrset[i]``, and + ``answer[i:j]`` is equivalent to ``answer.rrset[i:j]``. + + Note that CNAMEs or DNAMEs in the response may mean that answer + RRset's name might not be the query name. + """ + + def __init__( + self, + qname: dns.name.Name, + rdtype: dns.rdatatype.RdataType, + rdclass: dns.rdataclass.RdataClass, + response: dns.message.QueryMessage, + nameserver: Optional[str] = None, + port: Optional[int] = None, + ) -> None: + self.qname = qname + self.rdtype = rdtype + self.rdclass = rdclass + self.response = response + self.nameserver = nameserver + self.port = port + self.chaining_result = response.resolve_chaining() + # Copy some attributes out of chaining_result for backwards + # compatibility and convenience. + self.canonical_name = self.chaining_result.canonical_name + self.rrset = self.chaining_result.answer + self.expiration = time.time() + self.chaining_result.minimum_ttl + + def __getattr__(self, attr): # pragma: no cover + if attr == "name": + return self.rrset.name + elif attr == "ttl": + return self.rrset.ttl + elif attr == "covers": + return self.rrset.covers + elif attr == "rdclass": + return self.rrset.rdclass + elif attr == "rdtype": + return self.rrset.rdtype + else: + raise AttributeError(attr) + + def __len__(self) -> int: + return self.rrset and len(self.rrset) or 0 + + def __iter__(self) -> Iterator[dns.rdata.Rdata]: + return self.rrset and iter(self.rrset) or iter(tuple()) + + def __getitem__(self, i): + if self.rrset is None: + raise IndexError + return self.rrset[i] + + def __delitem__(self, i): + if self.rrset is None: + raise IndexError + del self.rrset[i] + + +class Answers(dict): + """A dict of DNS stub resolver answers, indexed by type.""" + + +class HostAnswers(Answers): + """A dict of DNS stub resolver answers to a host name lookup, indexed by + type. + """ + + @classmethod + def make( + cls, + v6: Optional[Answer] = None, + v4: Optional[Answer] = None, + add_empty: bool = True, + ) -> "HostAnswers": + answers = HostAnswers() + if v6 is not None and (add_empty or v6.rrset): + answers[dns.rdatatype.AAAA] = v6 + if v4 is not None and (add_empty or v4.rrset): + answers[dns.rdatatype.A] = v4 + return answers + + # Returns pairs of (address, family) from this result, potentially + # filtering by address family. + def addresses_and_families( + self, family: int = socket.AF_UNSPEC + ) -> Iterator[Tuple[str, int]]: + if family == socket.AF_UNSPEC: + yield from self.addresses_and_families(socket.AF_INET6) + yield from self.addresses_and_families(socket.AF_INET) + return + elif family == socket.AF_INET6: + answer = self.get(dns.rdatatype.AAAA) + elif family == socket.AF_INET: + answer = self.get(dns.rdatatype.A) + else: # pragma: no cover + raise NotImplementedError(f"unknown address family {family}") + if answer: + for rdata in answer: + yield (rdata.address, family) + + # Returns addresses from this result, potentially filtering by + # address family. + def addresses(self, family: int = socket.AF_UNSPEC) -> Iterator[str]: + return (pair[0] for pair in self.addresses_and_families(family)) + + # Returns the canonical name from this result. + def canonical_name(self) -> dns.name.Name: + answer = self.get(dns.rdatatype.AAAA, self.get(dns.rdatatype.A)) + return answer.canonical_name + + +class CacheStatistics: + """Cache Statistics""" + + def __init__(self, hits: int = 0, misses: int = 0) -> None: + self.hits = hits + self.misses = misses + + def reset(self) -> None: + self.hits = 0 + self.misses = 0 + + def clone(self) -> "CacheStatistics": + return CacheStatistics(self.hits, self.misses) + + +class CacheBase: + def __init__(self) -> None: + self.lock = threading.Lock() + self.statistics = CacheStatistics() + + def reset_statistics(self) -> None: + """Reset all statistics to zero.""" + with self.lock: + self.statistics.reset() + + def hits(self) -> int: + """How many hits has the cache had?""" + with self.lock: + return self.statistics.hits + + def misses(self) -> int: + """How many misses has the cache had?""" + with self.lock: + return self.statistics.misses + + def get_statistics_snapshot(self) -> CacheStatistics: + """Return a consistent snapshot of all the statistics. + + If running with multiple threads, it's better to take a + snapshot than to call statistics methods such as hits() and + misses() individually. + """ + with self.lock: + return self.statistics.clone() + + +CacheKey = Tuple[dns.name.Name, dns.rdatatype.RdataType, dns.rdataclass.RdataClass] + + +class Cache(CacheBase): + """Simple thread-safe DNS answer cache.""" + + def __init__(self, cleaning_interval: float = 300.0) -> None: + """*cleaning_interval*, a ``float`` is the number of seconds between + periodic cleanings. + """ + + super().__init__() + self.data: Dict[CacheKey, Answer] = {} + self.cleaning_interval = cleaning_interval + self.next_cleaning: float = time.time() + self.cleaning_interval + + def _maybe_clean(self) -> None: + """Clean the cache if it's time to do so.""" + + now = time.time() + if self.next_cleaning <= now: + keys_to_delete = [] + for k, v in self.data.items(): + if v.expiration <= now: + keys_to_delete.append(k) + for k in keys_to_delete: + del self.data[k] + now = time.time() + self.next_cleaning = now + self.cleaning_interval + + def get(self, key: CacheKey) -> Optional[Answer]: + """Get the answer associated with *key*. + + Returns None if no answer is cached for the key. + + *key*, a ``(dns.name.Name, dns.rdatatype.RdataType, dns.rdataclass.RdataClass)`` + tuple whose values are the query name, rdtype, and rdclass respectively. + + Returns a ``dns.resolver.Answer`` or ``None``. + """ + + with self.lock: + self._maybe_clean() + v = self.data.get(key) + if v is None or v.expiration <= time.time(): + self.statistics.misses += 1 + return None + self.statistics.hits += 1 + return v + + def put(self, key: CacheKey, value: Answer) -> None: + """Associate key and value in the cache. + + *key*, a ``(dns.name.Name, dns.rdatatype.RdataType, dns.rdataclass.RdataClass)`` + tuple whose values are the query name, rdtype, and rdclass respectively. + + *value*, a ``dns.resolver.Answer``, the answer. + """ + + with self.lock: + self._maybe_clean() + self.data[key] = value + + def flush(self, key: Optional[CacheKey] = None) -> None: + """Flush the cache. + + If *key* is not ``None``, only that item is flushed. Otherwise the entire cache + is flushed. + + *key*, a ``(dns.name.Name, dns.rdatatype.RdataType, dns.rdataclass.RdataClass)`` + tuple whose values are the query name, rdtype, and rdclass respectively. + """ + + with self.lock: + if key is not None: + if key in self.data: + del self.data[key] + else: + self.data = {} + self.next_cleaning = time.time() + self.cleaning_interval + + +class LRUCacheNode: + """LRUCache node.""" + + def __init__(self, key, value): + self.key = key + self.value = value + self.hits = 0 + self.prev = self + self.next = self + + def link_after(self, node: "LRUCacheNode") -> None: + self.prev = node + self.next = node.next + node.next.prev = self + node.next = self + + def unlink(self) -> None: + self.next.prev = self.prev + self.prev.next = self.next + + +class LRUCache(CacheBase): + """Thread-safe, bounded, least-recently-used DNS answer cache. + + This cache is better than the simple cache (above) if you're + running a web crawler or other process that does a lot of + resolutions. The LRUCache has a maximum number of nodes, and when + it is full, the least-recently used node is removed to make space + for a new one. + """ + + def __init__(self, max_size: int = 100000) -> None: + """*max_size*, an ``int``, is the maximum number of nodes to cache; + it must be greater than 0. + """ + + super().__init__() + self.data: Dict[CacheKey, LRUCacheNode] = {} + self.set_max_size(max_size) + self.sentinel: LRUCacheNode = LRUCacheNode(None, None) + self.sentinel.prev = self.sentinel + self.sentinel.next = self.sentinel + + def set_max_size(self, max_size: int) -> None: + if max_size < 1: + max_size = 1 + self.max_size = max_size + + def get(self, key: CacheKey) -> Optional[Answer]: + """Get the answer associated with *key*. + + Returns None if no answer is cached for the key. + + *key*, a ``(dns.name.Name, dns.rdatatype.RdataType, dns.rdataclass.RdataClass)`` + tuple whose values are the query name, rdtype, and rdclass respectively. + + Returns a ``dns.resolver.Answer`` or ``None``. + """ + + with self.lock: + node = self.data.get(key) + if node is None: + self.statistics.misses += 1 + return None + # Unlink because we're either going to move the node to the front + # of the LRU list or we're going to free it. + node.unlink() + if node.value.expiration <= time.time(): + del self.data[node.key] + self.statistics.misses += 1 + return None + node.link_after(self.sentinel) + self.statistics.hits += 1 + node.hits += 1 + return node.value + + def get_hits_for_key(self, key: CacheKey) -> int: + """Return the number of cache hits associated with the specified key.""" + with self.lock: + node = self.data.get(key) + if node is None or node.value.expiration <= time.time(): + return 0 + else: + return node.hits + + def put(self, key: CacheKey, value: Answer) -> None: + """Associate key and value in the cache. + + *key*, a ``(dns.name.Name, dns.rdatatype.RdataType, dns.rdataclass.RdataClass)`` + tuple whose values are the query name, rdtype, and rdclass respectively. + + *value*, a ``dns.resolver.Answer``, the answer. + """ + + with self.lock: + node = self.data.get(key) + if node is not None: + node.unlink() + del self.data[node.key] + while len(self.data) >= self.max_size: + gnode = self.sentinel.prev + gnode.unlink() + del self.data[gnode.key] + node = LRUCacheNode(key, value) + node.link_after(self.sentinel) + self.data[key] = node + + def flush(self, key: Optional[CacheKey] = None) -> None: + """Flush the cache. + + If *key* is not ``None``, only that item is flushed. Otherwise the entire cache + is flushed. + + *key*, a ``(dns.name.Name, dns.rdatatype.RdataType, dns.rdataclass.RdataClass)`` + tuple whose values are the query name, rdtype, and rdclass respectively. + """ + + with self.lock: + if key is not None: + node = self.data.get(key) + if node is not None: + node.unlink() + del self.data[node.key] + else: + gnode = self.sentinel.next + while gnode != self.sentinel: + next = gnode.next + gnode.unlink() + gnode = next + self.data = {} + + +class _Resolution: + """Helper class for dns.resolver.Resolver.resolve(). + + All of the "business logic" of resolution is encapsulated in this + class, allowing us to have multiple resolve() implementations + using different I/O schemes without copying all of the + complicated logic. + + This class is a "friend" to dns.resolver.Resolver and manipulates + resolver data structures directly. + """ + + def __init__( + self, + resolver: "BaseResolver", + qname: Union[dns.name.Name, str], + rdtype: Union[dns.rdatatype.RdataType, str], + rdclass: Union[dns.rdataclass.RdataClass, str], + tcp: bool, + raise_on_no_answer: bool, + search: Optional[bool], + ) -> None: + if isinstance(qname, str): + qname = dns.name.from_text(qname, None) + rdtype = dns.rdatatype.RdataType.make(rdtype) + if dns.rdatatype.is_metatype(rdtype): + raise NoMetaqueries + rdclass = dns.rdataclass.RdataClass.make(rdclass) + if dns.rdataclass.is_metaclass(rdclass): + raise NoMetaqueries + self.resolver = resolver + self.qnames_to_try = resolver._get_qnames_to_try(qname, search) + self.qnames = self.qnames_to_try[:] + self.rdtype = rdtype + self.rdclass = rdclass + self.tcp = tcp + self.raise_on_no_answer = raise_on_no_answer + self.nxdomain_responses: Dict[dns.name.Name, dns.message.QueryMessage] = {} + # Initialize other things to help analysis tools + self.qname = dns.name.empty + self.nameservers: List[dns.nameserver.Nameserver] = [] + self.current_nameservers: List[dns.nameserver.Nameserver] = [] + self.errors: List[ErrorTuple] = [] + self.nameserver: Optional[dns.nameserver.Nameserver] = None + self.tcp_attempt = False + self.retry_with_tcp = False + self.request: Optional[dns.message.QueryMessage] = None + self.backoff = 0.0 + + def next_request( + self, + ) -> Tuple[Optional[dns.message.QueryMessage], Optional[Answer]]: + """Get the next request to send, and check the cache. + + Returns a (request, answer) tuple. At most one of request or + answer will not be None. + """ + + # We return a tuple instead of Union[Message,Answer] as it lets + # the caller avoid isinstance(). + + while len(self.qnames) > 0: + self.qname = self.qnames.pop(0) + + # Do we know the answer? + if self.resolver.cache: + answer = self.resolver.cache.get( + (self.qname, self.rdtype, self.rdclass) + ) + if answer is not None: + if answer.rrset is None and self.raise_on_no_answer: + raise NoAnswer(response=answer.response) + else: + return (None, answer) + answer = self.resolver.cache.get( + (self.qname, dns.rdatatype.ANY, self.rdclass) + ) + if answer is not None and answer.response.rcode() == dns.rcode.NXDOMAIN: + # cached NXDOMAIN; record it and continue to next + # name. + self.nxdomain_responses[self.qname] = answer.response + continue + + # Build the request + request = dns.message.make_query(self.qname, self.rdtype, self.rdclass) + if self.resolver.keyname is not None: + request.use_tsig( + self.resolver.keyring, + self.resolver.keyname, + algorithm=self.resolver.keyalgorithm, + ) + request.use_edns( + self.resolver.edns, + self.resolver.ednsflags, + self.resolver.payload, + options=self.resolver.ednsoptions, + ) + if self.resolver.flags is not None: + request.flags = self.resolver.flags + + self.nameservers = self.resolver._enrich_nameservers( + self.resolver._nameservers, + self.resolver.nameserver_ports, + self.resolver.port, + ) + if self.resolver.rotate: + random.shuffle(self.nameservers) + self.current_nameservers = self.nameservers[:] + self.errors = [] + self.nameserver = None + self.tcp_attempt = False + self.retry_with_tcp = False + self.request = request + self.backoff = 0.10 + + return (request, None) + + # + # We've tried everything and only gotten NXDOMAINs. (We know + # it's only NXDOMAINs as anything else would have returned + # before now.) + # + raise NXDOMAIN(qnames=self.qnames_to_try, responses=self.nxdomain_responses) + + def next_nameserver(self) -> Tuple[dns.nameserver.Nameserver, bool, float]: + if self.retry_with_tcp: + assert self.nameserver is not None + assert not self.nameserver.is_always_max_size() + self.tcp_attempt = True + self.retry_with_tcp = False + return (self.nameserver, True, 0) + + backoff = 0.0 + if not self.current_nameservers: + if len(self.nameservers) == 0: + # Out of things to try! + raise NoNameservers(request=self.request, errors=self.errors) + self.current_nameservers = self.nameservers[:] + backoff = self.backoff + self.backoff = min(self.backoff * 2, 2) + + self.nameserver = self.current_nameservers.pop(0) + self.tcp_attempt = self.tcp or self.nameserver.is_always_max_size() + return (self.nameserver, self.tcp_attempt, backoff) + + def query_result( + self, response: Optional[dns.message.Message], ex: Optional[Exception] + ) -> Tuple[Optional[Answer], bool]: + # + # returns an (answer: Answer, end_loop: bool) tuple. + # + assert self.nameserver is not None + if ex: + # Exception during I/O or from_wire() + assert response is None + self.errors.append( + ( + str(self.nameserver), + self.tcp_attempt, + self.nameserver.answer_port(), + ex, + response, + ) + ) + if ( + isinstance(ex, dns.exception.FormError) + or isinstance(ex, EOFError) + or isinstance(ex, OSError) + or isinstance(ex, NotImplementedError) + ): + # This nameserver is no good, take it out of the mix. + self.nameservers.remove(self.nameserver) + elif isinstance(ex, dns.message.Truncated): + if self.tcp_attempt: + # Truncation with TCP is no good! + self.nameservers.remove(self.nameserver) + else: + self.retry_with_tcp = True + return (None, False) + # We got an answer! + assert response is not None + assert isinstance(response, dns.message.QueryMessage) + rcode = response.rcode() + if rcode == dns.rcode.NOERROR: + try: + answer = Answer( + self.qname, + self.rdtype, + self.rdclass, + response, + self.nameserver.answer_nameserver(), + self.nameserver.answer_port(), + ) + except Exception as e: + self.errors.append( + ( + str(self.nameserver), + self.tcp_attempt, + self.nameserver.answer_port(), + e, + response, + ) + ) + # The nameserver is no good, take it out of the mix. + self.nameservers.remove(self.nameserver) + return (None, False) + if self.resolver.cache: + self.resolver.cache.put((self.qname, self.rdtype, self.rdclass), answer) + if answer.rrset is None and self.raise_on_no_answer: + raise NoAnswer(response=answer.response) + return (answer, True) + elif rcode == dns.rcode.NXDOMAIN: + # Further validate the response by making an Answer, even + # if we aren't going to cache it. + try: + answer = Answer( + self.qname, dns.rdatatype.ANY, dns.rdataclass.IN, response + ) + except Exception as e: + self.errors.append( + ( + str(self.nameserver), + self.tcp_attempt, + self.nameserver.answer_port(), + e, + response, + ) + ) + # The nameserver is no good, take it out of the mix. + self.nameservers.remove(self.nameserver) + return (None, False) + self.nxdomain_responses[self.qname] = response + if self.resolver.cache: + self.resolver.cache.put( + (self.qname, dns.rdatatype.ANY, self.rdclass), answer + ) + # Make next_nameserver() return None, so caller breaks its + # inner loop and calls next_request(). + return (None, True) + elif rcode == dns.rcode.YXDOMAIN: + yex = YXDOMAIN() + self.errors.append( + ( + str(self.nameserver), + self.tcp_attempt, + self.nameserver.answer_port(), + yex, + response, + ) + ) + raise yex + else: + # + # We got a response, but we're not happy with the + # rcode in it. + # + if rcode != dns.rcode.SERVFAIL or not self.resolver.retry_servfail: + self.nameservers.remove(self.nameserver) + self.errors.append( + ( + str(self.nameserver), + self.tcp_attempt, + self.nameserver.answer_port(), + dns.rcode.to_text(rcode), + response, + ) + ) + return (None, False) + + +class BaseResolver: + """DNS stub resolver.""" + + # We initialize in reset() + # + # pylint: disable=attribute-defined-outside-init + + domain: dns.name.Name + nameserver_ports: Dict[str, int] + port: int + search: List[dns.name.Name] + use_search_by_default: bool + timeout: float + lifetime: float + keyring: Optional[Any] + keyname: Optional[Union[dns.name.Name, str]] + keyalgorithm: Union[dns.name.Name, str] + edns: int + ednsflags: int + ednsoptions: Optional[List[dns.edns.Option]] + payload: int + cache: Any + flags: Optional[int] + retry_servfail: bool + rotate: bool + ndots: Optional[int] + _nameservers: Sequence[Union[str, dns.nameserver.Nameserver]] + + def __init__( + self, filename: str = "/etc/resolv.conf", configure: bool = True + ) -> None: + """*filename*, a ``str`` or file object, specifying a file + in standard /etc/resolv.conf format. This parameter is meaningful + only when *configure* is true and the platform is POSIX. + + *configure*, a ``bool``. If True (the default), the resolver + instance is configured in the normal fashion for the operating + system the resolver is running on. (I.e. by reading a + /etc/resolv.conf file on POSIX systems and from the registry + on Windows systems.) + """ + + self.reset() + if configure: + if sys.platform == "win32": # pragma: no cover + self.read_registry() + elif filename: + self.read_resolv_conf(filename) + + def reset(self) -> None: + """Reset all resolver configuration to the defaults.""" + + self.domain = dns.name.Name(dns.name.from_text(socket.gethostname())[1:]) + if len(self.domain) == 0: # pragma: no cover + self.domain = dns.name.root + self._nameservers = [] + self.nameserver_ports = {} + self.port = 53 + self.search = [] + self.use_search_by_default = False + self.timeout = 2.0 + self.lifetime = 5.0 + self.keyring = None + self.keyname = None + self.keyalgorithm = dns.tsig.default_algorithm + self.edns = -1 + self.ednsflags = 0 + self.ednsoptions = None + self.payload = 0 + self.cache = None + self.flags = None + self.retry_servfail = False + self.rotate = False + self.ndots = None + + def read_resolv_conf(self, f: Any) -> None: + """Process *f* as a file in the /etc/resolv.conf format. If f is + a ``str``, it is used as the name of the file to open; otherwise it + is treated as the file itself. + + Interprets the following items: + + - nameserver - name server IP address + + - domain - local domain name + + - search - search list for host-name lookup + + - options - supported options are rotate, timeout, edns0, and ndots + + """ + + nameservers = [] + if isinstance(f, str): + try: + cm: contextlib.AbstractContextManager = open(f) + except OSError: + # /etc/resolv.conf doesn't exist, can't be read, etc. + raise NoResolverConfiguration(f"cannot open {f}") + else: + cm = contextlib.nullcontext(f) + with cm as f: + for l in f: + if len(l) == 0 or l[0] == "#" or l[0] == ";": + continue + tokens = l.split() + + # Any line containing less than 2 tokens is malformed + if len(tokens) < 2: + continue + + if tokens[0] == "nameserver": + nameservers.append(tokens[1]) + elif tokens[0] == "domain": + self.domain = dns.name.from_text(tokens[1]) + # domain and search are exclusive + self.search = [] + elif tokens[0] == "search": + # the last search wins + self.search = [] + for suffix in tokens[1:]: + self.search.append(dns.name.from_text(suffix)) + # We don't set domain as it is not used if + # len(self.search) > 0 + elif tokens[0] == "options": + for opt in tokens[1:]: + if opt == "rotate": + self.rotate = True + elif opt == "edns0": + self.use_edns() + elif "timeout" in opt: + try: + self.timeout = int(opt.split(":")[1]) + except (ValueError, IndexError): + pass + elif "ndots" in opt: + try: + self.ndots = int(opt.split(":")[1]) + except (ValueError, IndexError): + pass + if len(nameservers) == 0: + raise NoResolverConfiguration("no nameservers") + # Assigning directly instead of appending means we invoke the + # setter logic, with additonal checking and enrichment. + self.nameservers = nameservers + + def read_registry(self) -> None: # pragma: no cover + """Extract resolver configuration from the Windows registry.""" + try: + info = dns.win32util.get_dns_info() # type: ignore + if info.domain is not None: + self.domain = info.domain + self.nameservers = info.nameservers + self.search = info.search + except AttributeError: + raise NotImplementedError + + def _compute_timeout( + self, + start: float, + lifetime: Optional[float] = None, + errors: Optional[List[ErrorTuple]] = None, + ) -> float: + lifetime = self.lifetime if lifetime is None else lifetime + now = time.time() + duration = now - start + if errors is None: + errors = [] + if duration < 0: + if duration < -1: + # Time going backwards is bad. Just give up. + raise LifetimeTimeout(timeout=duration, errors=errors) + else: + # Time went backwards, but only a little. This can + # happen, e.g. under vmware with older linux kernels. + # Pretend it didn't happen. + duration = 0 + if duration >= lifetime: + raise LifetimeTimeout(timeout=duration, errors=errors) + return min(lifetime - duration, self.timeout) + + def _get_qnames_to_try( + self, qname: dns.name.Name, search: Optional[bool] + ) -> List[dns.name.Name]: + # This is a separate method so we can unit test the search + # rules without requiring the Internet. + if search is None: + search = self.use_search_by_default + qnames_to_try = [] + if qname.is_absolute(): + qnames_to_try.append(qname) + else: + abs_qname = qname.concatenate(dns.name.root) + if search: + if len(self.search) > 0: + # There is a search list, so use it exclusively + search_list = self.search[:] + elif self.domain != dns.name.root and self.domain is not None: + # We have some notion of a domain that isn't the root, so + # use it as the search list. + search_list = [self.domain] + else: + search_list = [] + # Figure out the effective ndots (default is 1) + if self.ndots is None: + ndots = 1 + else: + ndots = self.ndots + for suffix in search_list: + qnames_to_try.append(qname + suffix) + if len(qname) > ndots: + # The name has at least ndots dots, so we should try an + # absolute query first. + qnames_to_try.insert(0, abs_qname) + else: + # The name has less than ndots dots, so we should search + # first, then try the absolute name. + qnames_to_try.append(abs_qname) + else: + qnames_to_try.append(abs_qname) + return qnames_to_try + + def use_tsig( + self, + keyring: Any, + keyname: Optional[Union[dns.name.Name, str]] = None, + algorithm: Union[dns.name.Name, str] = dns.tsig.default_algorithm, + ) -> None: + """Add a TSIG signature to each query. + + The parameters are passed to ``dns.message.Message.use_tsig()``; + see its documentation for details. + """ + + self.keyring = keyring + self.keyname = keyname + self.keyalgorithm = algorithm + + def use_edns( + self, + edns: Optional[Union[int, bool]] = 0, + ednsflags: int = 0, + payload: int = dns.message.DEFAULT_EDNS_PAYLOAD, + options: Optional[List[dns.edns.Option]] = None, + ) -> None: + """Configure EDNS behavior. + + *edns*, an ``int``, is the EDNS level to use. Specifying + ``None``, ``False``, or ``-1`` means "do not use EDNS", and in this case + the other parameters are ignored. Specifying ``True`` is + equivalent to specifying 0, i.e. "use EDNS0". + + *ednsflags*, an ``int``, the EDNS flag values. + + *payload*, an ``int``, is the EDNS sender's payload field, which is the + maximum size of UDP datagram the sender can handle. I.e. how big + a response to this message can be. + + *options*, a list of ``dns.edns.Option`` objects or ``None``, the EDNS + options. + """ + + if edns is None or edns is False: + edns = -1 + elif edns is True: + edns = 0 + self.edns = edns + self.ednsflags = ednsflags + self.payload = payload + self.ednsoptions = options + + def set_flags(self, flags: int) -> None: + """Overrides the default flags with your own. + + *flags*, an ``int``, the message flags to use. + """ + + self.flags = flags + + @classmethod + def _enrich_nameservers( + cls, + nameservers: Sequence[Union[str, dns.nameserver.Nameserver]], + nameserver_ports: Dict[str, int], + default_port: int, + ) -> List[dns.nameserver.Nameserver]: + enriched_nameservers = [] + if isinstance(nameservers, list): + for nameserver in nameservers: + enriched_nameserver: dns.nameserver.Nameserver + if isinstance(nameserver, dns.nameserver.Nameserver): + enriched_nameserver = nameserver + elif dns.inet.is_address(nameserver): + port = nameserver_ports.get(nameserver, default_port) + enriched_nameserver = dns.nameserver.Do53Nameserver( + nameserver, port + ) + else: + try: + if urlparse(nameserver).scheme != "https": + raise NotImplementedError + except Exception: + raise ValueError( + f"nameserver {nameserver} is not a " + "dns.nameserver.Nameserver instance or text form, " + "IP address, nor a valid https URL" + ) + enriched_nameserver = dns.nameserver.DoHNameserver(nameserver) + enriched_nameservers.append(enriched_nameserver) + else: + raise ValueError( + f"nameservers must be a list or tuple (not a {type(nameservers)})" + ) + return enriched_nameservers + + @property + def nameservers( + self, + ) -> Sequence[Union[str, dns.nameserver.Nameserver]]: + return self._nameservers + + @nameservers.setter + def nameservers( + self, nameservers: Sequence[Union[str, dns.nameserver.Nameserver]] + ) -> None: + """ + *nameservers*, a ``list`` of nameservers, where a nameserver is either + a string interpretable as a nameserver, or a ``dns.nameserver.Nameserver`` + instance. + + Raises ``ValueError`` if *nameservers* is not a list of nameservers. + """ + # We just call _enrich_nameservers() for checking + self._enrich_nameservers(nameservers, self.nameserver_ports, self.port) + self._nameservers = nameservers + + +class Resolver(BaseResolver): + """DNS stub resolver.""" + + def resolve( + self, + qname: Union[dns.name.Name, str], + rdtype: Union[dns.rdatatype.RdataType, str] = dns.rdatatype.A, + rdclass: Union[dns.rdataclass.RdataClass, str] = dns.rdataclass.IN, + tcp: bool = False, + source: Optional[str] = None, + raise_on_no_answer: bool = True, + source_port: int = 0, + lifetime: Optional[float] = None, + search: Optional[bool] = None, + ) -> Answer: # pylint: disable=arguments-differ + """Query nameservers to find the answer to the question. + + The *qname*, *rdtype*, and *rdclass* parameters may be objects + of the appropriate type, or strings that can be converted into objects + of the appropriate type. + + *qname*, a ``dns.name.Name`` or ``str``, the query name. + + *rdtype*, an ``int`` or ``str``, the query type. + + *rdclass*, an ``int`` or ``str``, the query class. + + *tcp*, a ``bool``. If ``True``, use TCP to make the query. + + *source*, a ``str`` or ``None``. If not ``None``, bind to this IP + address when making queries. + + *raise_on_no_answer*, a ``bool``. If ``True``, raise + ``dns.resolver.NoAnswer`` if there's no answer to the question. + + *source_port*, an ``int``, the port from which to send the message. + + *lifetime*, a ``float``, how many seconds a query should run + before timing out. + + *search*, a ``bool`` or ``None``, determines whether the + search list configured in the system's resolver configuration + are used for relative names, and whether the resolver's domain + may be added to relative names. The default is ``None``, + which causes the value of the resolver's + ``use_search_by_default`` attribute to be used. + + Raises ``dns.resolver.LifetimeTimeout`` if no answers could be found + in the specified lifetime. + + Raises ``dns.resolver.NXDOMAIN`` if the query name does not exist. + + Raises ``dns.resolver.YXDOMAIN`` if the query name is too long after + DNAME substitution. + + Raises ``dns.resolver.NoAnswer`` if *raise_on_no_answer* is + ``True`` and the query name exists but has no RRset of the + desired type and class. + + Raises ``dns.resolver.NoNameservers`` if no non-broken + nameservers are available to answer the question. + + Returns a ``dns.resolver.Answer`` instance. + + """ + + resolution = _Resolution( + self, qname, rdtype, rdclass, tcp, raise_on_no_answer, search + ) + start = time.time() + while True: + (request, answer) = resolution.next_request() + # Note we need to say "if answer is not None" and not just + # "if answer" because answer implements __len__, and python + # will call that. We want to return if we have an answer + # object, including in cases where its length is 0. + if answer is not None: + # cache hit! + return answer + assert request is not None # needed for type checking + done = False + while not done: + (nameserver, tcp, backoff) = resolution.next_nameserver() + if backoff: + time.sleep(backoff) + timeout = self._compute_timeout(start, lifetime, resolution.errors) + try: + response = nameserver.query( + request, + timeout=timeout, + source=source, + source_port=source_port, + max_size=tcp, + ) + except Exception as ex: + (_, done) = resolution.query_result(None, ex) + continue + (answer, done) = resolution.query_result(response, None) + # Note we need to say "if answer is not None" and not just + # "if answer" because answer implements __len__, and python + # will call that. We want to return if we have an answer + # object, including in cases where its length is 0. + if answer is not None: + return answer + + def query( + self, + qname: Union[dns.name.Name, str], + rdtype: Union[dns.rdatatype.RdataType, str] = dns.rdatatype.A, + rdclass: Union[dns.rdataclass.RdataClass, str] = dns.rdataclass.IN, + tcp: bool = False, + source: Optional[str] = None, + raise_on_no_answer: bool = True, + source_port: int = 0, + lifetime: Optional[float] = None, + ) -> Answer: # pragma: no cover + """Query nameservers to find the answer to the question. + + This method calls resolve() with ``search=True``, and is + provided for backwards compatibility with prior versions of + dnspython. See the documentation for the resolve() method for + further details. + """ + warnings.warn( + "please use dns.resolver.Resolver.resolve() instead", + DeprecationWarning, + stacklevel=2, + ) + return self.resolve( + qname, + rdtype, + rdclass, + tcp, + source, + raise_on_no_answer, + source_port, + lifetime, + True, + ) + + def resolve_address(self, ipaddr: str, *args: Any, **kwargs: Any) -> Answer: + """Use a resolver to run a reverse query for PTR records. + + This utilizes the resolve() method to perform a PTR lookup on the + specified IP address. + + *ipaddr*, a ``str``, the IPv4 or IPv6 address you want to get + the PTR record for. + + All other arguments that can be passed to the resolve() function + except for rdtype and rdclass are also supported by this + function. + """ + # We make a modified kwargs for type checking happiness, as otherwise + # we get a legit warning about possibly having rdtype and rdclass + # in the kwargs more than once. + modified_kwargs: Dict[str, Any] = {} + modified_kwargs.update(kwargs) + modified_kwargs["rdtype"] = dns.rdatatype.PTR + modified_kwargs["rdclass"] = dns.rdataclass.IN + return self.resolve( + dns.reversename.from_address(ipaddr), *args, **modified_kwargs + ) + + def resolve_name( + self, + name: Union[dns.name.Name, str], + family: int = socket.AF_UNSPEC, + **kwargs: Any, + ) -> HostAnswers: + """Use a resolver to query for address records. + + This utilizes the resolve() method to perform A and/or AAAA lookups on + the specified name. + + *qname*, a ``dns.name.Name`` or ``str``, the name to resolve. + + *family*, an ``int``, the address family. If socket.AF_UNSPEC + (the default), both A and AAAA records will be retrieved. + + All other arguments that can be passed to the resolve() function + except for rdtype and rdclass are also supported by this + function. + """ + # We make a modified kwargs for type checking happiness, as otherwise + # we get a legit warning about possibly having rdtype and rdclass + # in the kwargs more than once. + modified_kwargs: Dict[str, Any] = {} + modified_kwargs.update(kwargs) + modified_kwargs.pop("rdtype", None) + modified_kwargs["rdclass"] = dns.rdataclass.IN + + if family == socket.AF_INET: + v4 = self.resolve(name, dns.rdatatype.A, **modified_kwargs) + return HostAnswers.make(v4=v4) + elif family == socket.AF_INET6: + v6 = self.resolve(name, dns.rdatatype.AAAA, **modified_kwargs) + return HostAnswers.make(v6=v6) + elif family != socket.AF_UNSPEC: # pragma: no cover + raise NotImplementedError(f"unknown address family {family}") + + raise_on_no_answer = modified_kwargs.pop("raise_on_no_answer", True) + lifetime = modified_kwargs.pop("lifetime", None) + start = time.time() + v6 = self.resolve( + name, + dns.rdatatype.AAAA, + raise_on_no_answer=False, + lifetime=self._compute_timeout(start, lifetime), + **modified_kwargs, + ) + # Note that setting name ensures we query the same name + # for A as we did for AAAA. (This is just in case search lists + # are active by default in the resolver configuration and + # we might be talking to a server that says NXDOMAIN when it + # wants to say NOERROR no data. + name = v6.qname + v4 = self.resolve( + name, + dns.rdatatype.A, + raise_on_no_answer=False, + lifetime=self._compute_timeout(start, lifetime), + **modified_kwargs, + ) + answers = HostAnswers.make(v6=v6, v4=v4, add_empty=not raise_on_no_answer) + if not answers: + raise NoAnswer(response=v6.response) + return answers + + # pylint: disable=redefined-outer-name + + def canonical_name(self, name: Union[dns.name.Name, str]) -> dns.name.Name: + """Determine the canonical name of *name*. + + The canonical name is the name the resolver uses for queries + after all CNAME and DNAME renamings have been applied. + + *name*, a ``dns.name.Name`` or ``str``, the query name. + + This method can raise any exception that ``resolve()`` can + raise, other than ``dns.resolver.NoAnswer`` and + ``dns.resolver.NXDOMAIN``. + + Returns a ``dns.name.Name``. + """ + try: + answer = self.resolve(name, raise_on_no_answer=False) + canonical_name = answer.canonical_name + except dns.resolver.NXDOMAIN as e: + canonical_name = e.canonical_name + return canonical_name + + # pylint: enable=redefined-outer-name + + def try_ddr(self, lifetime: float = 5.0) -> None: + """Try to update the resolver's nameservers using Discovery of Designated + Resolvers (DDR). If successful, the resolver will subsequently use + DNS-over-HTTPS or DNS-over-TLS for future queries. + + *lifetime*, a float, is the maximum time to spend attempting DDR. The default + is 5 seconds. + + If the SVCB query is successful and results in a non-empty list of nameservers, + then the resolver's nameservers are set to the returned servers in priority + order. + + The current implementation does not use any address hints from the SVCB record, + nor does it resolve addresses for the SCVB target name, rather it assumes that + the bootstrap nameserver will always be one of the addresses and uses it. + A future revision to the code may offer fuller support. The code verifies that + the bootstrap nameserver is in the Subject Alternative Name field of the + TLS certficate. + """ + try: + expiration = time.time() + lifetime + answer = self.resolve( + dns._ddr._local_resolver_name, "SVCB", lifetime=lifetime + ) + timeout = dns.query._remaining(expiration) + nameservers = dns._ddr._get_nameservers_sync(answer, timeout) + if len(nameservers) > 0: + self.nameservers = nameservers + except Exception: # pragma: no cover + pass + + +#: The default resolver. +default_resolver: Optional[Resolver] = None + + +def get_default_resolver() -> Resolver: + """Get the default resolver, initializing it if necessary.""" + if default_resolver is None: + reset_default_resolver() + assert default_resolver is not None + return default_resolver + + +def reset_default_resolver() -> None: + """Re-initialize default resolver. + + Note that the resolver configuration (i.e. /etc/resolv.conf on UNIX + systems) will be re-read immediately. + """ + + global default_resolver + default_resolver = Resolver() + + +def resolve( + qname: Union[dns.name.Name, str], + rdtype: Union[dns.rdatatype.RdataType, str] = dns.rdatatype.A, + rdclass: Union[dns.rdataclass.RdataClass, str] = dns.rdataclass.IN, + tcp: bool = False, + source: Optional[str] = None, + raise_on_no_answer: bool = True, + source_port: int = 0, + lifetime: Optional[float] = None, + search: Optional[bool] = None, +) -> Answer: # pragma: no cover + """Query nameservers to find the answer to the question. + + This is a convenience function that uses the default resolver + object to make the query. + + See ``dns.resolver.Resolver.resolve`` for more information on the + parameters. + """ + + return get_default_resolver().resolve( + qname, + rdtype, + rdclass, + tcp, + source, + raise_on_no_answer, + source_port, + lifetime, + search, + ) + + +def query( + qname: Union[dns.name.Name, str], + rdtype: Union[dns.rdatatype.RdataType, str] = dns.rdatatype.A, + rdclass: Union[dns.rdataclass.RdataClass, str] = dns.rdataclass.IN, + tcp: bool = False, + source: Optional[str] = None, + raise_on_no_answer: bool = True, + source_port: int = 0, + lifetime: Optional[float] = None, +) -> Answer: # pragma: no cover + """Query nameservers to find the answer to the question. + + This method calls resolve() with ``search=True``, and is + provided for backwards compatibility with prior versions of + dnspython. See the documentation for the resolve() method for + further details. + """ + warnings.warn( + "please use dns.resolver.resolve() instead", DeprecationWarning, stacklevel=2 + ) + return resolve( + qname, + rdtype, + rdclass, + tcp, + source, + raise_on_no_answer, + source_port, + lifetime, + True, + ) + + +def resolve_address(ipaddr: str, *args: Any, **kwargs: Any) -> Answer: + """Use a resolver to run a reverse query for PTR records. + + See ``dns.resolver.Resolver.resolve_address`` for more information on the + parameters. + """ + + return get_default_resolver().resolve_address(ipaddr, *args, **kwargs) + + +def resolve_name( + name: Union[dns.name.Name, str], family: int = socket.AF_UNSPEC, **kwargs: Any +) -> HostAnswers: + """Use a resolver to query for address records. + + See ``dns.resolver.Resolver.resolve_name`` for more information on the + parameters. + """ + + return get_default_resolver().resolve_name(name, family, **kwargs) + + +def canonical_name(name: Union[dns.name.Name, str]) -> dns.name.Name: + """Determine the canonical name of *name*. + + See ``dns.resolver.Resolver.canonical_name`` for more information on the + parameters and possible exceptions. + """ + + return get_default_resolver().canonical_name(name) + + +def try_ddr(lifetime: float = 5.0) -> None: # pragma: no cover + """Try to update the default resolver's nameservers using Discovery of Designated + Resolvers (DDR). If successful, the resolver will subsequently use + DNS-over-HTTPS or DNS-over-TLS for future queries. + + See :py:func:`dns.resolver.Resolver.try_ddr` for more information. + """ + return get_default_resolver().try_ddr(lifetime) + + +def zone_for_name( + name: Union[dns.name.Name, str], + rdclass: dns.rdataclass.RdataClass = dns.rdataclass.IN, + tcp: bool = False, + resolver: Optional[Resolver] = None, + lifetime: Optional[float] = None, +) -> dns.name.Name: + """Find the name of the zone which contains the specified name. + + *name*, an absolute ``dns.name.Name`` or ``str``, the query name. + + *rdclass*, an ``int``, the query class. + + *tcp*, a ``bool``. If ``True``, use TCP to make the query. + + *resolver*, a ``dns.resolver.Resolver`` or ``None``, the resolver to use. + If ``None``, the default, then the default resolver is used. + + *lifetime*, a ``float``, the total time to allow for the queries needed + to determine the zone. If ``None``, the default, then only the individual + query limits of the resolver apply. + + Raises ``dns.resolver.NoRootSOA`` if there is no SOA RR at the DNS + root. (This is only likely to happen if you're using non-default + root servers in your network and they are misconfigured.) + + Raises ``dns.resolver.LifetimeTimeout`` if the answer could not be + found in the allotted lifetime. + + Returns a ``dns.name.Name``. + """ + + if isinstance(name, str): + name = dns.name.from_text(name, dns.name.root) + if resolver is None: + resolver = get_default_resolver() + if not name.is_absolute(): + raise NotAbsolute(name) + start = time.time() + expiration: Optional[float] + if lifetime is not None: + expiration = start + lifetime + else: + expiration = None + while 1: + try: + rlifetime: Optional[float] + if expiration is not None: + rlifetime = expiration - time.time() + if rlifetime <= 0: + rlifetime = 0 + else: + rlifetime = None + answer = resolver.resolve( + name, dns.rdatatype.SOA, rdclass, tcp, lifetime=rlifetime + ) + assert answer.rrset is not None + if answer.rrset.name == name: + return name + # otherwise we were CNAMEd or DNAMEd and need to look higher + except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as e: + if isinstance(e, dns.resolver.NXDOMAIN): + response = e.responses().get(name) + else: + response = e.response() # pylint: disable=no-value-for-parameter + if response: + for rrs in response.authority: + if rrs.rdtype == dns.rdatatype.SOA and rrs.rdclass == rdclass: + (nr, _, _) = rrs.name.fullcompare(name) + if nr == dns.name.NAMERELN_SUPERDOMAIN: + # We're doing a proper superdomain check as + # if the name were equal we ought to have gotten + # it in the answer section! We are ignoring the + # possibility that the authority is insane and + # is including multiple SOA RRs for different + # authorities. + return rrs.name + # we couldn't extract anything useful from the response (e.g. it's + # a type 3 NXDOMAIN) + try: + name = name.parent() + except dns.name.NoParent: + raise NoRootSOA + + +def make_resolver_at( + where: Union[dns.name.Name, str], + port: int = 53, + family: int = socket.AF_UNSPEC, + resolver: Optional[Resolver] = None, +) -> Resolver: + """Make a stub resolver using the specified destination as the full resolver. + + *where*, a ``dns.name.Name`` or ``str`` the domain name or IP address of the + full resolver. + + *port*, an ``int``, the port to use. If not specified, the default is 53. + + *family*, an ``int``, the address family to use. This parameter is used if + *where* is not an address. The default is ``socket.AF_UNSPEC`` in which case + the first address returned by ``resolve_name()`` will be used, otherwise the + first address of the specified family will be used. + + *resolver*, a ``dns.resolver.Resolver`` or ``None``, the resolver to use for + resolution of hostnames. If not specified, the default resolver will be used. + + Returns a ``dns.resolver.Resolver`` or raises an exception. + """ + if resolver is None: + resolver = get_default_resolver() + nameservers: List[Union[str, dns.nameserver.Nameserver]] = [] + if isinstance(where, str) and dns.inet.is_address(where): + nameservers.append(dns.nameserver.Do53Nameserver(where, port)) + else: + for address in resolver.resolve_name(where, family).addresses(): + nameservers.append(dns.nameserver.Do53Nameserver(address, port)) + res = dns.resolver.Resolver(configure=False) + res.nameservers = nameservers + return res + + +def resolve_at( + where: Union[dns.name.Name, str], + qname: Union[dns.name.Name, str], + rdtype: Union[dns.rdatatype.RdataType, str] = dns.rdatatype.A, + rdclass: Union[dns.rdataclass.RdataClass, str] = dns.rdataclass.IN, + tcp: bool = False, + source: Optional[str] = None, + raise_on_no_answer: bool = True, + source_port: int = 0, + lifetime: Optional[float] = None, + search: Optional[bool] = None, + port: int = 53, + family: int = socket.AF_UNSPEC, + resolver: Optional[Resolver] = None, +) -> Answer: + """Query nameservers to find the answer to the question. + + This is a convenience function that calls ``dns.resolver.make_resolver_at()`` to + make a resolver, and then uses it to resolve the query. + + See ``dns.resolver.Resolver.resolve`` for more information on the resolution + parameters, and ``dns.resolver.make_resolver_at`` for information about the resolver + parameters *where*, *port*, *family*, and *resolver*. + + If making more than one query, it is more efficient to call + ``dns.resolver.make_resolver_at()`` and then use that resolver for the queries + instead of calling ``resolve_at()`` multiple times. + """ + return make_resolver_at(where, port, family, resolver).resolve( + qname, + rdtype, + rdclass, + tcp, + source, + raise_on_no_answer, + source_port, + lifetime, + search, + ) + + +# +# Support for overriding the system resolver for all python code in the +# running process. +# + +_protocols_for_socktype = { + socket.SOCK_DGRAM: [socket.SOL_UDP], + socket.SOCK_STREAM: [socket.SOL_TCP], +} + +_resolver = None +_original_getaddrinfo = socket.getaddrinfo +_original_getnameinfo = socket.getnameinfo +_original_getfqdn = socket.getfqdn +_original_gethostbyname = socket.gethostbyname +_original_gethostbyname_ex = socket.gethostbyname_ex +_original_gethostbyaddr = socket.gethostbyaddr + + +def _getaddrinfo( + host=None, service=None, family=socket.AF_UNSPEC, socktype=0, proto=0, flags=0 +): + if flags & socket.AI_NUMERICHOST != 0: + # Short circuit directly into the system's getaddrinfo(). We're + # not adding any value in this case, and this avoids infinite loops + # because dns.query.* needs to call getaddrinfo() for IPv6 scoping + # reasons. We will also do this short circuit below if we + # discover that the host is an address literal. + return _original_getaddrinfo(host, service, family, socktype, proto, flags) + if flags & (socket.AI_ADDRCONFIG | socket.AI_V4MAPPED) != 0: + # Not implemented. We raise a gaierror as opposed to a + # NotImplementedError as it helps callers handle errors more + # appropriately. [Issue #316] + # + # We raise EAI_FAIL as opposed to EAI_SYSTEM because there is + # no EAI_SYSTEM on Windows [Issue #416]. We didn't go for + # EAI_BADFLAGS as the flags aren't bad, we just don't + # implement them. + raise socket.gaierror( + socket.EAI_FAIL, "Non-recoverable failure in name resolution" + ) + if host is None and service is None: + raise socket.gaierror(socket.EAI_NONAME, "Name or service not known") + addrs = [] + canonical_name = None # pylint: disable=redefined-outer-name + # Is host None or an address literal? If so, use the system's + # getaddrinfo(). + if host is None: + return _original_getaddrinfo(host, service, family, socktype, proto, flags) + try: + # We don't care about the result of af_for_address(), we're just + # calling it so it raises an exception if host is not an IPv4 or + # IPv6 address. + dns.inet.af_for_address(host) + return _original_getaddrinfo(host, service, family, socktype, proto, flags) + except Exception: + pass + # Something needs resolution! + try: + answers = _resolver.resolve_name(host, family) + addrs = answers.addresses_and_families() + canonical_name = answers.canonical_name().to_text(True) + except dns.resolver.NXDOMAIN: + raise socket.gaierror(socket.EAI_NONAME, "Name or service not known") + except Exception: + # We raise EAI_AGAIN here as the failure may be temporary + # (e.g. a timeout) and EAI_SYSTEM isn't defined on Windows. + # [Issue #416] + raise socket.gaierror(socket.EAI_AGAIN, "Temporary failure in name resolution") + port = None + try: + # Is it a port literal? + if service is None: + port = 0 + else: + port = int(service) + except Exception: + if flags & socket.AI_NUMERICSERV == 0: + try: + port = socket.getservbyname(service) + except Exception: + pass + if port is None: + raise socket.gaierror(socket.EAI_NONAME, "Name or service not known") + tuples = [] + if socktype == 0: + socktypes = [socket.SOCK_DGRAM, socket.SOCK_STREAM] + else: + socktypes = [socktype] + if flags & socket.AI_CANONNAME != 0: + cname = canonical_name + else: + cname = "" + for addr, af in addrs: + for socktype in socktypes: + for proto in _protocols_for_socktype[socktype]: + addr_tuple = dns.inet.low_level_address_tuple((addr, port), af) + tuples.append((af, socktype, proto, cname, addr_tuple)) + if len(tuples) == 0: + raise socket.gaierror(socket.EAI_NONAME, "Name or service not known") + return tuples + + +def _getnameinfo(sockaddr, flags=0): + host = sockaddr[0] + port = sockaddr[1] + if len(sockaddr) == 4: + scope = sockaddr[3] + family = socket.AF_INET6 + else: + scope = None + family = socket.AF_INET + tuples = _getaddrinfo(host, port, family, socket.SOCK_STREAM, socket.SOL_TCP, 0) + if len(tuples) > 1: + raise OSError("sockaddr resolved to multiple addresses") + addr = tuples[0][4][0] + if flags & socket.NI_DGRAM: + pname = "udp" + else: + pname = "tcp" + qname = dns.reversename.from_address(addr) + if flags & socket.NI_NUMERICHOST == 0: + try: + answer = _resolver.resolve(qname, "PTR") + hostname = answer.rrset[0].target.to_text(True) + except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer): + if flags & socket.NI_NAMEREQD: + raise socket.gaierror(socket.EAI_NONAME, "Name or service not known") + hostname = addr + if scope is not None: + hostname += "%" + str(scope) + else: + hostname = addr + if scope is not None: + hostname += "%" + str(scope) + if flags & socket.NI_NUMERICSERV: + service = str(port) + else: + service = socket.getservbyport(port, pname) + return (hostname, service) + + +def _getfqdn(name=None): + if name is None: + name = socket.gethostname() + try: + (name, _, _) = _gethostbyaddr(name) + # Python's version checks aliases too, but our gethostbyname + # ignores them, so we do so here as well. + except Exception: # pragma: no cover + pass + return name + + +def _gethostbyname(name): + return _gethostbyname_ex(name)[2][0] + + +def _gethostbyname_ex(name): + aliases = [] + addresses = [] + tuples = _getaddrinfo( + name, 0, socket.AF_INET, socket.SOCK_STREAM, socket.SOL_TCP, socket.AI_CANONNAME + ) + canonical = tuples[0][3] + for item in tuples: + addresses.append(item[4][0]) + # XXX we just ignore aliases + return (canonical, aliases, addresses) + + +def _gethostbyaddr(ip): + try: + dns.ipv6.inet_aton(ip) + sockaddr = (ip, 80, 0, 0) + family = socket.AF_INET6 + except Exception: + try: + dns.ipv4.inet_aton(ip) + except Exception: + raise socket.gaierror(socket.EAI_NONAME, "Name or service not known") + sockaddr = (ip, 80) + family = socket.AF_INET + (name, _) = _getnameinfo(sockaddr, socket.NI_NAMEREQD) + aliases = [] + addresses = [] + tuples = _getaddrinfo( + name, 0, family, socket.SOCK_STREAM, socket.SOL_TCP, socket.AI_CANONNAME + ) + canonical = tuples[0][3] + # We only want to include an address from the tuples if it's the + # same as the one we asked about. We do this comparison in binary + # to avoid any differences in text representations. + bin_ip = dns.inet.inet_pton(family, ip) + for item in tuples: + addr = item[4][0] + bin_addr = dns.inet.inet_pton(family, addr) + if bin_ip == bin_addr: + addresses.append(addr) + # XXX we just ignore aliases + return (canonical, aliases, addresses) + + +def override_system_resolver(resolver: Optional[Resolver] = None) -> None: + """Override the system resolver routines in the socket module with + versions which use dnspython's resolver. + + This can be useful in testing situations where you want to control + the resolution behavior of python code without having to change + the system's resolver settings (e.g. /etc/resolv.conf). + + The resolver to use may be specified; if it's not, the default + resolver will be used. + + resolver, a ``dns.resolver.Resolver`` or ``None``, the resolver to use. + """ + + if resolver is None: + resolver = get_default_resolver() + global _resolver + _resolver = resolver + socket.getaddrinfo = _getaddrinfo + socket.getnameinfo = _getnameinfo + socket.getfqdn = _getfqdn + socket.gethostbyname = _gethostbyname + socket.gethostbyname_ex = _gethostbyname_ex + socket.gethostbyaddr = _gethostbyaddr + + +def restore_system_resolver() -> None: + """Undo the effects of prior override_system_resolver().""" + + global _resolver + _resolver = None + socket.getaddrinfo = _original_getaddrinfo + socket.getnameinfo = _original_getnameinfo + socket.getfqdn = _original_getfqdn + socket.gethostbyname = _original_gethostbyname + socket.gethostbyname_ex = _original_gethostbyname_ex + socket.gethostbyaddr = _original_gethostbyaddr diff --git a/vllm/lib/python3.10/site-packages/dns/serial.py b/vllm/lib/python3.10/site-packages/dns/serial.py new file mode 100644 index 0000000000000000000000000000000000000000..3417299be2bbb3726780f1ebf74bb16974cae308 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/dns/serial.py @@ -0,0 +1,118 @@ +# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license + +"""Serial Number Arthimetic from RFC 1982""" + + +class Serial: + def __init__(self, value: int, bits: int = 32): + self.value = value % 2**bits + self.bits = bits + + def __repr__(self): + return f"dns.serial.Serial({self.value}, {self.bits})" + + def __eq__(self, other): + if isinstance(other, int): + other = Serial(other, self.bits) + elif not isinstance(other, Serial) or other.bits != self.bits: + return NotImplemented + return self.value == other.value + + def __ne__(self, other): + if isinstance(other, int): + other = Serial(other, self.bits) + elif not isinstance(other, Serial) or other.bits != self.bits: + return NotImplemented + return self.value != other.value + + def __lt__(self, other): + if isinstance(other, int): + other = Serial(other, self.bits) + elif not isinstance(other, Serial) or other.bits != self.bits: + return NotImplemented + if self.value < other.value and other.value - self.value < 2 ** (self.bits - 1): + return True + elif self.value > other.value and self.value - other.value > 2 ** ( + self.bits - 1 + ): + return True + else: + return False + + def __le__(self, other): + return self == other or self < other + + def __gt__(self, other): + if isinstance(other, int): + other = Serial(other, self.bits) + elif not isinstance(other, Serial) or other.bits != self.bits: + return NotImplemented + if self.value < other.value and other.value - self.value > 2 ** (self.bits - 1): + return True + elif self.value > other.value and self.value - other.value < 2 ** ( + self.bits - 1 + ): + return True + else: + return False + + def __ge__(self, other): + return self == other or self > other + + def __add__(self, other): + v = self.value + if isinstance(other, Serial): + delta = other.value + elif isinstance(other, int): + delta = other + else: + raise ValueError + if abs(delta) > (2 ** (self.bits - 1) - 1): + raise ValueError + v += delta + v = v % 2**self.bits + return Serial(v, self.bits) + + def __iadd__(self, other): + v = self.value + if isinstance(other, Serial): + delta = other.value + elif isinstance(other, int): + delta = other + else: + raise ValueError + if abs(delta) > (2 ** (self.bits - 1) - 1): + raise ValueError + v += delta + v = v % 2**self.bits + self.value = v + return self + + def __sub__(self, other): + v = self.value + if isinstance(other, Serial): + delta = other.value + elif isinstance(other, int): + delta = other + else: + raise ValueError + if abs(delta) > (2 ** (self.bits - 1) - 1): + raise ValueError + v -= delta + v = v % 2**self.bits + return Serial(v, self.bits) + + def __isub__(self, other): + v = self.value + if isinstance(other, Serial): + delta = other.value + elif isinstance(other, int): + delta = other + else: + raise ValueError + if abs(delta) > (2 ** (self.bits - 1) - 1): + raise ValueError + v -= delta + v = v % 2**self.bits + self.value = v + return self diff --git a/vllm/lib/python3.10/site-packages/dns/transaction.py b/vllm/lib/python3.10/site-packages/dns/transaction.py new file mode 100644 index 0000000000000000000000000000000000000000..aa2e1160336b6450525f1748bdc05c05e08acca4 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/dns/transaction.py @@ -0,0 +1,649 @@ +# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license + +import collections +from typing import Any, Callable, Iterator, List, Optional, Tuple, Union + +import dns.exception +import dns.name +import dns.node +import dns.rdataclass +import dns.rdataset +import dns.rdatatype +import dns.rrset +import dns.serial +import dns.ttl + + +class TransactionManager: + def reader(self) -> "Transaction": + """Begin a read-only transaction.""" + raise NotImplementedError # pragma: no cover + + def writer(self, replacement: bool = False) -> "Transaction": + """Begin a writable transaction. + + *replacement*, a ``bool``. If `True`, the content of the + transaction completely replaces any prior content. If False, + the default, then the content of the transaction updates the + existing content. + """ + raise NotImplementedError # pragma: no cover + + def origin_information( + self, + ) -> Tuple[Optional[dns.name.Name], bool, Optional[dns.name.Name]]: + """Returns a tuple + + (absolute_origin, relativize, effective_origin) + + giving the absolute name of the default origin for any + relative domain names, the "effective origin", and whether + names should be relativized. The "effective origin" is the + absolute origin if relativize is False, and the empty name if + relativize is true. (The effective origin is provided even + though it can be computed from the absolute_origin and + relativize setting because it avoids a lot of code + duplication.) + + If the returned names are `None`, then no origin information is + available. + + This information is used by code working with transactions to + allow it to coordinate relativization. The transaction code + itself takes what it gets (i.e. does not change name + relativity). + + """ + raise NotImplementedError # pragma: no cover + + def get_class(self) -> dns.rdataclass.RdataClass: + """The class of the transaction manager.""" + raise NotImplementedError # pragma: no cover + + def from_wire_origin(self) -> Optional[dns.name.Name]: + """Origin to use in from_wire() calls.""" + (absolute_origin, relativize, _) = self.origin_information() + if relativize: + return absolute_origin + else: + return None + + +class DeleteNotExact(dns.exception.DNSException): + """Existing data did not match data specified by an exact delete.""" + + +class ReadOnly(dns.exception.DNSException): + """Tried to write to a read-only transaction.""" + + +class AlreadyEnded(dns.exception.DNSException): + """Tried to use an already-ended transaction.""" + + +def _ensure_immutable_rdataset(rdataset): + if rdataset is None or isinstance(rdataset, dns.rdataset.ImmutableRdataset): + return rdataset + return dns.rdataset.ImmutableRdataset(rdataset) + + +def _ensure_immutable_node(node): + if node is None or node.is_immutable(): + return node + return dns.node.ImmutableNode(node) + + +CheckPutRdatasetType = Callable[ + ["Transaction", dns.name.Name, dns.rdataset.Rdataset], None +] +CheckDeleteRdatasetType = Callable[ + ["Transaction", dns.name.Name, dns.rdatatype.RdataType, dns.rdatatype.RdataType], + None, +] +CheckDeleteNameType = Callable[["Transaction", dns.name.Name], None] + + +class Transaction: + def __init__( + self, + manager: TransactionManager, + replacement: bool = False, + read_only: bool = False, + ): + self.manager = manager + self.replacement = replacement + self.read_only = read_only + self._ended = False + self._check_put_rdataset: List[CheckPutRdatasetType] = [] + self._check_delete_rdataset: List[CheckDeleteRdatasetType] = [] + self._check_delete_name: List[CheckDeleteNameType] = [] + + # + # This is the high level API + # + # Note that we currently use non-immutable types in the return type signature to + # avoid covariance problems, e.g. if the caller has a List[Rdataset], mypy will be + # unhappy if we return an ImmutableRdataset. + + def get( + self, + name: Optional[Union[dns.name.Name, str]], + rdtype: Union[dns.rdatatype.RdataType, str], + covers: Union[dns.rdatatype.RdataType, str] = dns.rdatatype.NONE, + ) -> dns.rdataset.Rdataset: + """Return the rdataset associated with *name*, *rdtype*, and *covers*, + or `None` if not found. + + Note that the returned rdataset is immutable. + """ + self._check_ended() + if isinstance(name, str): + name = dns.name.from_text(name, None) + rdtype = dns.rdatatype.RdataType.make(rdtype) + covers = dns.rdatatype.RdataType.make(covers) + rdataset = self._get_rdataset(name, rdtype, covers) + return _ensure_immutable_rdataset(rdataset) + + def get_node(self, name: dns.name.Name) -> Optional[dns.node.Node]: + """Return the node at *name*, if any. + + Returns an immutable node or ``None``. + """ + return _ensure_immutable_node(self._get_node(name)) + + def _check_read_only(self) -> None: + if self.read_only: + raise ReadOnly + + def add(self, *args: Any) -> None: + """Add records. + + The arguments may be: + + - rrset + + - name, rdataset... + + - name, ttl, rdata... + """ + self._check_ended() + self._check_read_only() + self._add(False, args) + + def replace(self, *args: Any) -> None: + """Replace the existing rdataset at the name with the specified + rdataset, or add the specified rdataset if there was no existing + rdataset. + + The arguments may be: + + - rrset + + - name, rdataset... + + - name, ttl, rdata... + + Note that if you want to replace the entire node, you should do + a delete of the name followed by one or more calls to add() or + replace(). + """ + self._check_ended() + self._check_read_only() + self._add(True, args) + + def delete(self, *args: Any) -> None: + """Delete records. + + It is not an error if some of the records are not in the existing + set. + + The arguments may be: + + - rrset + + - name + + - name, rdatatype, [covers] + + - name, rdataset... + + - name, rdata... + """ + self._check_ended() + self._check_read_only() + self._delete(False, args) + + def delete_exact(self, *args: Any) -> None: + """Delete records. + + The arguments may be: + + - rrset + + - name + + - name, rdatatype, [covers] + + - name, rdataset... + + - name, rdata... + + Raises dns.transaction.DeleteNotExact if some of the records + are not in the existing set. + + """ + self._check_ended() + self._check_read_only() + self._delete(True, args) + + def name_exists(self, name: Union[dns.name.Name, str]) -> bool: + """Does the specified name exist?""" + self._check_ended() + if isinstance(name, str): + name = dns.name.from_text(name, None) + return self._name_exists(name) + + def update_serial( + self, + value: int = 1, + relative: bool = True, + name: dns.name.Name = dns.name.empty, + ) -> None: + """Update the serial number. + + *value*, an `int`, is an increment if *relative* is `True`, or the + actual value to set if *relative* is `False`. + + Raises `KeyError` if there is no SOA rdataset at *name*. + + Raises `ValueError` if *value* is negative or if the increment is + so large that it would cause the new serial to be less than the + prior value. + """ + self._check_ended() + if value < 0: + raise ValueError("negative update_serial() value") + if isinstance(name, str): + name = dns.name.from_text(name, None) + rdataset = self._get_rdataset(name, dns.rdatatype.SOA, dns.rdatatype.NONE) + if rdataset is None or len(rdataset) == 0: + raise KeyError + if relative: + serial = dns.serial.Serial(rdataset[0].serial) + value + else: + serial = dns.serial.Serial(value) + serial = serial.value # convert back to int + if serial == 0: + serial = 1 + rdata = rdataset[0].replace(serial=serial) + new_rdataset = dns.rdataset.from_rdata(rdataset.ttl, rdata) + self.replace(name, new_rdataset) + + def __iter__(self): + self._check_ended() + return self._iterate_rdatasets() + + def changed(self) -> bool: + """Has this transaction changed anything? + + For read-only transactions, the result is always `False`. + + For writable transactions, the result is `True` if at some time + during the life of the transaction, the content was changed. + """ + self._check_ended() + return self._changed() + + def commit(self) -> None: + """Commit the transaction. + + Normally transactions are used as context managers and commit + or rollback automatically, but it may be done explicitly if needed. + A ``dns.transaction.Ended`` exception will be raised if you try + to use a transaction after it has been committed or rolled back. + + Raises an exception if the commit fails (in which case the transaction + is also rolled back. + """ + self._end(True) + + def rollback(self) -> None: + """Rollback the transaction. + + Normally transactions are used as context managers and commit + or rollback automatically, but it may be done explicitly if needed. + A ``dns.transaction.AlreadyEnded`` exception will be raised if you try + to use a transaction after it has been committed or rolled back. + + Rollback cannot otherwise fail. + """ + self._end(False) + + def check_put_rdataset(self, check: CheckPutRdatasetType) -> None: + """Call *check* before putting (storing) an rdataset. + + The function is called with the transaction, the name, and the rdataset. + + The check function may safely make non-mutating transaction method + calls, but behavior is undefined if mutating transaction methods are + called. The check function should raise an exception if it objects to + the put, and otherwise should return ``None``. + """ + self._check_put_rdataset.append(check) + + def check_delete_rdataset(self, check: CheckDeleteRdatasetType) -> None: + """Call *check* before deleting an rdataset. + + The function is called with the transaction, the name, the rdatatype, + and the covered rdatatype. + + The check function may safely make non-mutating transaction method + calls, but behavior is undefined if mutating transaction methods are + called. The check function should raise an exception if it objects to + the put, and otherwise should return ``None``. + """ + self._check_delete_rdataset.append(check) + + def check_delete_name(self, check: CheckDeleteNameType) -> None: + """Call *check* before putting (storing) an rdataset. + + The function is called with the transaction and the name. + + The check function may safely make non-mutating transaction method + calls, but behavior is undefined if mutating transaction methods are + called. The check function should raise an exception if it objects to + the put, and otherwise should return ``None``. + """ + self._check_delete_name.append(check) + + def iterate_rdatasets( + self, + ) -> Iterator[Tuple[dns.name.Name, dns.rdataset.Rdataset]]: + """Iterate all the rdatasets in the transaction, returning + (`dns.name.Name`, `dns.rdataset.Rdataset`) tuples. + + Note that as is usual with python iterators, adding or removing items + while iterating will invalidate the iterator and may raise `RuntimeError` + or fail to iterate over all entries.""" + self._check_ended() + return self._iterate_rdatasets() + + def iterate_names(self) -> Iterator[dns.name.Name]: + """Iterate all the names in the transaction. + + Note that as is usual with python iterators, adding or removing names + while iterating will invalidate the iterator and may raise `RuntimeError` + or fail to iterate over all entries.""" + self._check_ended() + return self._iterate_names() + + # + # Helper methods + # + + def _raise_if_not_empty(self, method, args): + if len(args) != 0: + raise TypeError(f"extra parameters to {method}") + + def _rdataset_from_args(self, method, deleting, args): + try: + arg = args.popleft() + if isinstance(arg, dns.rrset.RRset): + rdataset = arg.to_rdataset() + elif isinstance(arg, dns.rdataset.Rdataset): + rdataset = arg + else: + if deleting: + ttl = 0 + else: + if isinstance(arg, int): + ttl = arg + if ttl > dns.ttl.MAX_TTL: + raise ValueError(f"{method}: TTL value too big") + else: + raise TypeError(f"{method}: expected a TTL") + arg = args.popleft() + if isinstance(arg, dns.rdata.Rdata): + rdataset = dns.rdataset.from_rdata(ttl, arg) + else: + raise TypeError(f"{method}: expected an Rdata") + return rdataset + except IndexError: + if deleting: + return None + else: + # reraise + raise TypeError(f"{method}: expected more arguments") + + def _add(self, replace, args): + try: + args = collections.deque(args) + if replace: + method = "replace()" + else: + method = "add()" + arg = args.popleft() + if isinstance(arg, str): + arg = dns.name.from_text(arg, None) + if isinstance(arg, dns.name.Name): + name = arg + rdataset = self._rdataset_from_args(method, False, args) + elif isinstance(arg, dns.rrset.RRset): + rrset = arg + name = rrset.name + # rrsets are also rdatasets, but they don't print the + # same and can't be stored in nodes, so convert. + rdataset = rrset.to_rdataset() + else: + raise TypeError( + f"{method} requires a name or RRset as the first argument" + ) + if rdataset.rdclass != self.manager.get_class(): + raise ValueError(f"{method} has objects of wrong RdataClass") + if rdataset.rdtype == dns.rdatatype.SOA: + (_, _, origin) = self._origin_information() + if name != origin: + raise ValueError(f"{method} has non-origin SOA") + self._raise_if_not_empty(method, args) + if not replace: + existing = self._get_rdataset(name, rdataset.rdtype, rdataset.covers) + if existing is not None: + if isinstance(existing, dns.rdataset.ImmutableRdataset): + trds = dns.rdataset.Rdataset( + existing.rdclass, existing.rdtype, existing.covers + ) + trds.update(existing) + existing = trds + rdataset = existing.union(rdataset) + self._checked_put_rdataset(name, rdataset) + except IndexError: + raise TypeError(f"not enough parameters to {method}") + + def _delete(self, exact, args): + try: + args = collections.deque(args) + if exact: + method = "delete_exact()" + else: + method = "delete()" + arg = args.popleft() + if isinstance(arg, str): + arg = dns.name.from_text(arg, None) + if isinstance(arg, dns.name.Name): + name = arg + if len(args) > 0 and ( + isinstance(args[0], int) or isinstance(args[0], str) + ): + # deleting by type and (optionally) covers + rdtype = dns.rdatatype.RdataType.make(args.popleft()) + if len(args) > 0: + covers = dns.rdatatype.RdataType.make(args.popleft()) + else: + covers = dns.rdatatype.NONE + self._raise_if_not_empty(method, args) + existing = self._get_rdataset(name, rdtype, covers) + if existing is None: + if exact: + raise DeleteNotExact(f"{method}: missing rdataset") + else: + self._checked_delete_rdataset(name, rdtype, covers) + return + else: + rdataset = self._rdataset_from_args(method, True, args) + elif isinstance(arg, dns.rrset.RRset): + rdataset = arg # rrsets are also rdatasets + name = rdataset.name + else: + raise TypeError( + f"{method} requires a name or RRset as the first argument" + ) + self._raise_if_not_empty(method, args) + if rdataset: + if rdataset.rdclass != self.manager.get_class(): + raise ValueError(f"{method} has objects of wrong RdataClass") + existing = self._get_rdataset(name, rdataset.rdtype, rdataset.covers) + if existing is not None: + if exact: + intersection = existing.intersection(rdataset) + if intersection != rdataset: + raise DeleteNotExact(f"{method}: missing rdatas") + rdataset = existing.difference(rdataset) + if len(rdataset) == 0: + self._checked_delete_rdataset( + name, rdataset.rdtype, rdataset.covers + ) + else: + self._checked_put_rdataset(name, rdataset) + elif exact: + raise DeleteNotExact(f"{method}: missing rdataset") + else: + if exact and not self._name_exists(name): + raise DeleteNotExact(f"{method}: name not known") + self._checked_delete_name(name) + except IndexError: + raise TypeError(f"not enough parameters to {method}") + + def _check_ended(self): + if self._ended: + raise AlreadyEnded + + def _end(self, commit): + self._check_ended() + try: + self._end_transaction(commit) + finally: + self._ended = True + + def _checked_put_rdataset(self, name, rdataset): + for check in self._check_put_rdataset: + check(self, name, rdataset) + self._put_rdataset(name, rdataset) + + def _checked_delete_rdataset(self, name, rdtype, covers): + for check in self._check_delete_rdataset: + check(self, name, rdtype, covers) + self._delete_rdataset(name, rdtype, covers) + + def _checked_delete_name(self, name): + for check in self._check_delete_name: + check(self, name) + self._delete_name(name) + + # + # Transactions are context managers. + # + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if not self._ended: + if exc_type is None: + self.commit() + else: + self.rollback() + return False + + # + # This is the low level API, which must be implemented by subclasses + # of Transaction. + # + + def _get_rdataset(self, name, rdtype, covers): + """Return the rdataset associated with *name*, *rdtype*, and *covers*, + or `None` if not found. + """ + raise NotImplementedError # pragma: no cover + + def _put_rdataset(self, name, rdataset): + """Store the rdataset.""" + raise NotImplementedError # pragma: no cover + + def _delete_name(self, name): + """Delete all data associated with *name*. + + It is not an error if the name does not exist. + """ + raise NotImplementedError # pragma: no cover + + def _delete_rdataset(self, name, rdtype, covers): + """Delete all data associated with *name*, *rdtype*, and *covers*. + + It is not an error if the rdataset does not exist. + """ + raise NotImplementedError # pragma: no cover + + def _name_exists(self, name): + """Does name exist? + + Returns a bool. + """ + raise NotImplementedError # pragma: no cover + + def _changed(self): + """Has this transaction changed anything?""" + raise NotImplementedError # pragma: no cover + + def _end_transaction(self, commit): + """End the transaction. + + *commit*, a bool. If ``True``, commit the transaction, otherwise + roll it back. + + If committing and the commit fails, then roll back and raise an + exception. + """ + raise NotImplementedError # pragma: no cover + + def _set_origin(self, origin): + """Set the origin. + + This method is called when reading a possibly relativized + source, and an origin setting operation occurs (e.g. $ORIGIN + in a zone file). + """ + raise NotImplementedError # pragma: no cover + + def _iterate_rdatasets(self): + """Return an iterator that yields (name, rdataset) tuples.""" + raise NotImplementedError # pragma: no cover + + def _iterate_names(self): + """Return an iterator that yields a name.""" + raise NotImplementedError # pragma: no cover + + def _get_node(self, name): + """Return the node at *name*, if any. + + Returns a node or ``None``. + """ + raise NotImplementedError # pragma: no cover + + # + # Low-level API with a default implementation, in case a subclass needs + # to override. + # + + def _origin_information(self): + # This is only used by _add() + return self.manager.origin_information() diff --git a/vllm/lib/python3.10/site-packages/dns/tsigkeyring.py b/vllm/lib/python3.10/site-packages/dns/tsigkeyring.py new file mode 100644 index 0000000000000000000000000000000000000000..1010a79f8f3c1856b765fa11e01cb5b6e2f6ea64 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/dns/tsigkeyring.py @@ -0,0 +1,68 @@ +# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license + +# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose with or without fee is hereby granted, +# provided that the above copyright notice and this permission notice +# appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""A place to store TSIG keys.""" + +import base64 +from typing import Any, Dict + +import dns.name +import dns.tsig + + +def from_text(textring: Dict[str, Any]) -> Dict[dns.name.Name, dns.tsig.Key]: + """Convert a dictionary containing (textual DNS name, base64 secret) + pairs into a binary keyring which has (dns.name.Name, bytes) pairs, or + a dictionary containing (textual DNS name, (algorithm, base64 secret)) + pairs into a binary keyring which has (dns.name.Name, dns.tsig.Key) pairs. + @rtype: dict""" + + keyring = {} + for name, value in textring.items(): + kname = dns.name.from_text(name) + if isinstance(value, str): + keyring[kname] = dns.tsig.Key(kname, value).secret + else: + (algorithm, secret) = value + keyring[kname] = dns.tsig.Key(kname, secret, algorithm) + return keyring + + +def to_text(keyring: Dict[dns.name.Name, Any]) -> Dict[str, Any]: + """Convert a dictionary containing (dns.name.Name, dns.tsig.Key) pairs + into a text keyring which has (textual DNS name, (textual algorithm, + base64 secret)) pairs, or a dictionary containing (dns.name.Name, bytes) + pairs into a text keyring which has (textual DNS name, base64 secret) pairs. + @rtype: dict""" + + textring = {} + + def b64encode(secret): + return base64.encodebytes(secret).decode().rstrip() + + for name, key in keyring.items(): + tname = name.to_text() + if isinstance(key, bytes): + textring[tname] = b64encode(key) + else: + if isinstance(key.secret, bytes): + text_secret = b64encode(key.secret) + else: + text_secret = str(key.secret) + + textring[tname] = (key.algorithm.to_text(), text_secret) + return textring diff --git a/vllm/lib/python3.10/site-packages/dns/update.py b/vllm/lib/python3.10/site-packages/dns/update.py new file mode 100644 index 0000000000000000000000000000000000000000..bf1157acdfe7f4262afec600fd9a30691aa0f78d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/dns/update.py @@ -0,0 +1,386 @@ +# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license + +# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose with or without fee is hereby granted, +# provided that the above copyright notice and this permission notice +# appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""DNS Dynamic Update Support""" + +from typing import Any, List, Optional, Union + +import dns.message +import dns.name +import dns.opcode +import dns.rdata +import dns.rdataclass +import dns.rdataset +import dns.rdatatype +import dns.tsig + + +class UpdateSection(dns.enum.IntEnum): + """Update sections""" + + ZONE = 0 + PREREQ = 1 + UPDATE = 2 + ADDITIONAL = 3 + + @classmethod + def _maximum(cls): + return 3 + + +class UpdateMessage(dns.message.Message): # lgtm[py/missing-equals] + # ignore the mypy error here as we mean to use a different enum + _section_enum = UpdateSection # type: ignore + + def __init__( + self, + zone: Optional[Union[dns.name.Name, str]] = None, + rdclass: dns.rdataclass.RdataClass = dns.rdataclass.IN, + keyring: Optional[Any] = None, + keyname: Optional[dns.name.Name] = None, + keyalgorithm: Union[dns.name.Name, str] = dns.tsig.default_algorithm, + id: Optional[int] = None, + ): + """Initialize a new DNS Update object. + + See the documentation of the Message class for a complete + description of the keyring dictionary. + + *zone*, a ``dns.name.Name``, ``str``, or ``None``, the zone + which is being updated. ``None`` should only be used by dnspython's + message constructors, as a zone is required for the convenience + methods like ``add()``, ``replace()``, etc. + + *rdclass*, an ``int`` or ``str``, the class of the zone. + + The *keyring*, *keyname*, and *keyalgorithm* parameters are passed to + ``use_tsig()``; see its documentation for details. + """ + super().__init__(id=id) + self.flags |= dns.opcode.to_flags(dns.opcode.UPDATE) + if isinstance(zone, str): + zone = dns.name.from_text(zone) + self.origin = zone + rdclass = dns.rdataclass.RdataClass.make(rdclass) + self.zone_rdclass = rdclass + if self.origin: + self.find_rrset( + self.zone, + self.origin, + rdclass, + dns.rdatatype.SOA, + create=True, + force_unique=True, + ) + if keyring is not None: + self.use_tsig(keyring, keyname, algorithm=keyalgorithm) + + @property + def zone(self) -> List[dns.rrset.RRset]: + """The zone section.""" + return self.sections[0] + + @zone.setter + def zone(self, v): + self.sections[0] = v + + @property + def prerequisite(self) -> List[dns.rrset.RRset]: + """The prerequisite section.""" + return self.sections[1] + + @prerequisite.setter + def prerequisite(self, v): + self.sections[1] = v + + @property + def update(self) -> List[dns.rrset.RRset]: + """The update section.""" + return self.sections[2] + + @update.setter + def update(self, v): + self.sections[2] = v + + def _add_rr(self, name, ttl, rd, deleting=None, section=None): + """Add a single RR to the update section.""" + + if section is None: + section = self.update + covers = rd.covers() + rrset = self.find_rrset( + section, name, self.zone_rdclass, rd.rdtype, covers, deleting, True, True + ) + rrset.add(rd, ttl) + + def _add(self, replace, section, name, *args): + """Add records. + + *replace* is the replacement mode. If ``False``, + RRs are added to an existing RRset; if ``True``, the RRset + is replaced with the specified contents. The second + argument is the section to add to. The third argument + is always a name. The other arguments can be: + + - rdataset... + + - ttl, rdata... + + - ttl, rdtype, string... + """ + + if isinstance(name, str): + name = dns.name.from_text(name, None) + if isinstance(args[0], dns.rdataset.Rdataset): + for rds in args: + if replace: + self.delete(name, rds.rdtype) + for rd in rds: + self._add_rr(name, rds.ttl, rd, section=section) + else: + args = list(args) + ttl = int(args.pop(0)) + if isinstance(args[0], dns.rdata.Rdata): + if replace: + self.delete(name, args[0].rdtype) + for rd in args: + self._add_rr(name, ttl, rd, section=section) + else: + rdtype = dns.rdatatype.RdataType.make(args.pop(0)) + if replace: + self.delete(name, rdtype) + for s in args: + rd = dns.rdata.from_text(self.zone_rdclass, rdtype, s, self.origin) + self._add_rr(name, ttl, rd, section=section) + + def add(self, name: Union[dns.name.Name, str], *args: Any) -> None: + """Add records. + + The first argument is always a name. The other + arguments can be: + + - rdataset... + + - ttl, rdata... + + - ttl, rdtype, string... + """ + + self._add(False, self.update, name, *args) + + def delete(self, name: Union[dns.name.Name, str], *args: Any) -> None: + """Delete records. + + The first argument is always a name. The other + arguments can be: + + - *empty* + + - rdataset... + + - rdata... + + - rdtype, [string...] + """ + + if isinstance(name, str): + name = dns.name.from_text(name, None) + if len(args) == 0: + self.find_rrset( + self.update, + name, + dns.rdataclass.ANY, + dns.rdatatype.ANY, + dns.rdatatype.NONE, + dns.rdataclass.ANY, + True, + True, + ) + elif isinstance(args[0], dns.rdataset.Rdataset): + for rds in args: + for rd in rds: + self._add_rr(name, 0, rd, dns.rdataclass.NONE) + else: + largs = list(args) + if isinstance(largs[0], dns.rdata.Rdata): + for rd in largs: + self._add_rr(name, 0, rd, dns.rdataclass.NONE) + else: + rdtype = dns.rdatatype.RdataType.make(largs.pop(0)) + if len(largs) == 0: + self.find_rrset( + self.update, + name, + self.zone_rdclass, + rdtype, + dns.rdatatype.NONE, + dns.rdataclass.ANY, + True, + True, + ) + else: + for s in largs: + rd = dns.rdata.from_text( + self.zone_rdclass, + rdtype, + s, # type: ignore[arg-type] + self.origin, + ) + self._add_rr(name, 0, rd, dns.rdataclass.NONE) + + def replace(self, name: Union[dns.name.Name, str], *args: Any) -> None: + """Replace records. + + The first argument is always a name. The other + arguments can be: + + - rdataset... + + - ttl, rdata... + + - ttl, rdtype, string... + + Note that if you want to replace the entire node, you should do + a delete of the name followed by one or more calls to add. + """ + + self._add(True, self.update, name, *args) + + def present(self, name: Union[dns.name.Name, str], *args: Any) -> None: + """Require that an owner name (and optionally an rdata type, + or specific rdataset) exists as a prerequisite to the + execution of the update. + + The first argument is always a name. + The other arguments can be: + + - rdataset... + + - rdata... + + - rdtype, string... + """ + + if isinstance(name, str): + name = dns.name.from_text(name, None) + if len(args) == 0: + self.find_rrset( + self.prerequisite, + name, + dns.rdataclass.ANY, + dns.rdatatype.ANY, + dns.rdatatype.NONE, + None, + True, + True, + ) + elif ( + isinstance(args[0], dns.rdataset.Rdataset) + or isinstance(args[0], dns.rdata.Rdata) + or len(args) > 1 + ): + if not isinstance(args[0], dns.rdataset.Rdataset): + # Add a 0 TTL + largs = list(args) + largs.insert(0, 0) # type: ignore[arg-type] + self._add(False, self.prerequisite, name, *largs) + else: + self._add(False, self.prerequisite, name, *args) + else: + rdtype = dns.rdatatype.RdataType.make(args[0]) + self.find_rrset( + self.prerequisite, + name, + dns.rdataclass.ANY, + rdtype, + dns.rdatatype.NONE, + None, + True, + True, + ) + + def absent( + self, + name: Union[dns.name.Name, str], + rdtype: Optional[Union[dns.rdatatype.RdataType, str]] = None, + ) -> None: + """Require that an owner name (and optionally an rdata type) does + not exist as a prerequisite to the execution of the update.""" + + if isinstance(name, str): + name = dns.name.from_text(name, None) + if rdtype is None: + self.find_rrset( + self.prerequisite, + name, + dns.rdataclass.NONE, + dns.rdatatype.ANY, + dns.rdatatype.NONE, + None, + True, + True, + ) + else: + rdtype = dns.rdatatype.RdataType.make(rdtype) + self.find_rrset( + self.prerequisite, + name, + dns.rdataclass.NONE, + rdtype, + dns.rdatatype.NONE, + None, + True, + True, + ) + + def _get_one_rr_per_rrset(self, value): + # Updates are always one_rr_per_rrset + return True + + def _parse_rr_header(self, section, name, rdclass, rdtype): + deleting = None + empty = False + if section == UpdateSection.ZONE: + if ( + dns.rdataclass.is_metaclass(rdclass) + or rdtype != dns.rdatatype.SOA + or self.zone + ): + raise dns.exception.FormError + else: + if not self.zone: + raise dns.exception.FormError + if rdclass in (dns.rdataclass.ANY, dns.rdataclass.NONE): + deleting = rdclass + rdclass = self.zone[0].rdclass + empty = ( + deleting == dns.rdataclass.ANY or section == UpdateSection.PREREQ + ) + return (rdclass, rdtype, deleting, empty) + + +# backwards compatibility +Update = UpdateMessage + +### BEGIN generated UpdateSection constants + +ZONE = UpdateSection.ZONE +PREREQ = UpdateSection.PREREQ +UPDATE = UpdateSection.UPDATE +ADDITIONAL = UpdateSection.ADDITIONAL + +### END generated UpdateSection constants diff --git a/vllm/lib/python3.10/site-packages/dns/version.py b/vllm/lib/python3.10/site-packages/dns/version.py new file mode 100644 index 0000000000000000000000000000000000000000..9ed2ce19b2421f30c81ab6243e0120eb846ff9b7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/dns/version.py @@ -0,0 +1,58 @@ +# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license + +# Copyright (C) 2003-2017 Nominum, Inc. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose with or without fee is hereby granted, +# provided that the above copyright notice and this permission notice +# appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""dnspython release version information.""" + +#: MAJOR +MAJOR = 2 +#: MINOR +MINOR = 7 +#: MICRO +MICRO = 0 +#: RELEASELEVEL +RELEASELEVEL = 0x0F +#: SERIAL +SERIAL = 0 + +if RELEASELEVEL == 0x0F: # pragma: no cover lgtm[py/unreachable-statement] + #: version + version = "%d.%d.%d" % (MAJOR, MINOR, MICRO) # lgtm[py/unreachable-statement] +elif RELEASELEVEL == 0x00: # pragma: no cover lgtm[py/unreachable-statement] + version = "%d.%d.%ddev%d" % ( + MAJOR, + MINOR, + MICRO, + SERIAL, + ) # lgtm[py/unreachable-statement] +elif RELEASELEVEL == 0x0C: # pragma: no cover lgtm[py/unreachable-statement] + version = "%d.%d.%drc%d" % ( + MAJOR, + MINOR, + MICRO, + SERIAL, + ) # lgtm[py/unreachable-statement] +else: # pragma: no cover lgtm[py/unreachable-statement] + version = "%d.%d.%d%x%d" % ( + MAJOR, + MINOR, + MICRO, + RELEASELEVEL, + SERIAL, + ) # lgtm[py/unreachable-statement] + +#: hexversion +hexversion = MAJOR << 24 | MINOR << 16 | MICRO << 8 | RELEASELEVEL << 4 | SERIAL diff --git a/vllm/lib/python3.10/site-packages/dns/versioned.py b/vllm/lib/python3.10/site-packages/dns/versioned.py new file mode 100644 index 0000000000000000000000000000000000000000..fd78e674e6edbb0dc2dcab6bbc9515b4b2103520 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/dns/versioned.py @@ -0,0 +1,318 @@ +# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license + +"""DNS Versioned Zones.""" + +import collections +import threading +from typing import Callable, Deque, Optional, Set, Union + +import dns.exception +import dns.immutable +import dns.name +import dns.node +import dns.rdataclass +import dns.rdataset +import dns.rdatatype +import dns.rdtypes.ANY.SOA +import dns.zone + + +class UseTransaction(dns.exception.DNSException): + """To alter a versioned zone, use a transaction.""" + + +# Backwards compatibility +Node = dns.zone.VersionedNode +ImmutableNode = dns.zone.ImmutableVersionedNode +Version = dns.zone.Version +WritableVersion = dns.zone.WritableVersion +ImmutableVersion = dns.zone.ImmutableVersion +Transaction = dns.zone.Transaction + + +class Zone(dns.zone.Zone): # lgtm[py/missing-equals] + __slots__ = [ + "_versions", + "_versions_lock", + "_write_txn", + "_write_waiters", + "_write_event", + "_pruning_policy", + "_readers", + ] + + node_factory = Node + + def __init__( + self, + origin: Optional[Union[dns.name.Name, str]], + rdclass: dns.rdataclass.RdataClass = dns.rdataclass.IN, + relativize: bool = True, + pruning_policy: Optional[Callable[["Zone", Version], Optional[bool]]] = None, + ): + """Initialize a versioned zone object. + + *origin* is the origin of the zone. It may be a ``dns.name.Name``, + a ``str``, or ``None``. If ``None``, then the zone's origin will + be set by the first ``$ORIGIN`` line in a zone file. + + *rdclass*, an ``int``, the zone's rdata class; the default is class IN. + + *relativize*, a ``bool``, determine's whether domain names are + relativized to the zone's origin. The default is ``True``. + + *pruning policy*, a function taking a ``Zone`` and a ``Version`` and returning + a ``bool``, or ``None``. Should the version be pruned? If ``None``, + the default policy, which retains one version is used. + """ + super().__init__(origin, rdclass, relativize) + self._versions: Deque[Version] = collections.deque() + self._version_lock = threading.Lock() + if pruning_policy is None: + self._pruning_policy = self._default_pruning_policy + else: + self._pruning_policy = pruning_policy + self._write_txn: Optional[Transaction] = None + self._write_event: Optional[threading.Event] = None + self._write_waiters: Deque[threading.Event] = collections.deque() + self._readers: Set[Transaction] = set() + self._commit_version_unlocked( + None, WritableVersion(self, replacement=True), origin + ) + + def reader( + self, id: Optional[int] = None, serial: Optional[int] = None + ) -> Transaction: # pylint: disable=arguments-differ + if id is not None and serial is not None: + raise ValueError("cannot specify both id and serial") + with self._version_lock: + if id is not None: + version = None + for v in reversed(self._versions): + if v.id == id: + version = v + break + if version is None: + raise KeyError("version not found") + elif serial is not None: + if self.relativize: + oname = dns.name.empty + else: + assert self.origin is not None + oname = self.origin + version = None + for v in reversed(self._versions): + n = v.nodes.get(oname) + if n: + rds = n.get_rdataset(self.rdclass, dns.rdatatype.SOA) + if rds and rds[0].serial == serial: + version = v + break + if version is None: + raise KeyError("serial not found") + else: + version = self._versions[-1] + txn = Transaction(self, False, version) + self._readers.add(txn) + return txn + + def writer(self, replacement: bool = False) -> Transaction: + event = None + while True: + with self._version_lock: + # Checking event == self._write_event ensures that either + # no one was waiting before we got lucky and found no write + # txn, or we were the one who was waiting and got woken up. + # This prevents "taking cuts" when creating a write txn. + if self._write_txn is None and event == self._write_event: + # Creating the transaction defers version setup + # (i.e. copying the nodes dictionary) until we + # give up the lock, so that we hold the lock as + # short a time as possible. This is why we call + # _setup_version() below. + self._write_txn = Transaction( + self, replacement, make_immutable=True + ) + # give up our exclusive right to make a Transaction + self._write_event = None + break + # Someone else is writing already, so we will have to + # wait, but we want to do the actual wait outside the + # lock. + event = threading.Event() + self._write_waiters.append(event) + # wait (note we gave up the lock!) + # + # We only wake one sleeper at a time, so it's important + # that no event waiter can exit this method (e.g. via + # cancellation) without returning a transaction or waking + # someone else up. + # + # This is not a problem with Threading module threads as + # they cannot be canceled, but could be an issue with trio + # tasks when we do the async version of writer(). + # I.e. we'd need to do something like: + # + # try: + # event.wait() + # except trio.Cancelled: + # with self._version_lock: + # self._maybe_wakeup_one_waiter_unlocked() + # raise + # + event.wait() + # Do the deferred version setup. + self._write_txn._setup_version() + return self._write_txn + + def _maybe_wakeup_one_waiter_unlocked(self): + if len(self._write_waiters) > 0: + self._write_event = self._write_waiters.popleft() + self._write_event.set() + + # pylint: disable=unused-argument + def _default_pruning_policy(self, zone, version): + return True + + # pylint: enable=unused-argument + + def _prune_versions_unlocked(self): + assert len(self._versions) > 0 + # Don't ever prune a version greater than or equal to one that + # a reader has open. This pins versions in memory while the + # reader is open, and importantly lets the reader open a txn on + # a successor version (e.g. if generating an IXFR). + # + # Note our definition of least_kept also ensures we do not try to + # delete the greatest version. + if len(self._readers) > 0: + least_kept = min(txn.version.id for txn in self._readers) + else: + least_kept = self._versions[-1].id + while self._versions[0].id < least_kept and self._pruning_policy( + self, self._versions[0] + ): + self._versions.popleft() + + def set_max_versions(self, max_versions: Optional[int]) -> None: + """Set a pruning policy that retains up to the specified number + of versions + """ + if max_versions is not None and max_versions < 1: + raise ValueError("max versions must be at least 1") + if max_versions is None: + + def policy(zone, _): # pylint: disable=unused-argument + return False + + else: + + def policy(zone, _): + return len(zone._versions) > max_versions + + self.set_pruning_policy(policy) + + def set_pruning_policy( + self, policy: Optional[Callable[["Zone", Version], Optional[bool]]] + ) -> None: + """Set the pruning policy for the zone. + + The *policy* function takes a `Version` and returns `True` if + the version should be pruned, and `False` otherwise. `None` + may also be specified for policy, in which case the default policy + is used. + + Pruning checking proceeds from the least version and the first + time the function returns `False`, the checking stops. I.e. the + retained versions are always a consecutive sequence. + """ + if policy is None: + policy = self._default_pruning_policy + with self._version_lock: + self._pruning_policy = policy + self._prune_versions_unlocked() + + def _end_read(self, txn): + with self._version_lock: + self._readers.remove(txn) + self._prune_versions_unlocked() + + def _end_write_unlocked(self, txn): + assert self._write_txn == txn + self._write_txn = None + self._maybe_wakeup_one_waiter_unlocked() + + def _end_write(self, txn): + with self._version_lock: + self._end_write_unlocked(txn) + + def _commit_version_unlocked(self, txn, version, origin): + self._versions.append(version) + self._prune_versions_unlocked() + self.nodes = version.nodes + if self.origin is None: + self.origin = origin + # txn can be None in __init__ when we make the empty version. + if txn is not None: + self._end_write_unlocked(txn) + + def _commit_version(self, txn, version, origin): + with self._version_lock: + self._commit_version_unlocked(txn, version, origin) + + def _get_next_version_id(self): + if len(self._versions) > 0: + id = self._versions[-1].id + 1 + else: + id = 1 + return id + + def find_node( + self, name: Union[dns.name.Name, str], create: bool = False + ) -> dns.node.Node: + if create: + raise UseTransaction + return super().find_node(name) + + def delete_node(self, name: Union[dns.name.Name, str]) -> None: + raise UseTransaction + + def find_rdataset( + self, + name: Union[dns.name.Name, str], + rdtype: Union[dns.rdatatype.RdataType, str], + covers: Union[dns.rdatatype.RdataType, str] = dns.rdatatype.NONE, + create: bool = False, + ) -> dns.rdataset.Rdataset: + if create: + raise UseTransaction + rdataset = super().find_rdataset(name, rdtype, covers) + return dns.rdataset.ImmutableRdataset(rdataset) + + def get_rdataset( + self, + name: Union[dns.name.Name, str], + rdtype: Union[dns.rdatatype.RdataType, str], + covers: Union[dns.rdatatype.RdataType, str] = dns.rdatatype.NONE, + create: bool = False, + ) -> Optional[dns.rdataset.Rdataset]: + if create: + raise UseTransaction + rdataset = super().get_rdataset(name, rdtype, covers) + if rdataset is not None: + return dns.rdataset.ImmutableRdataset(rdataset) + else: + return None + + def delete_rdataset( + self, + name: Union[dns.name.Name, str], + rdtype: Union[dns.rdatatype.RdataType, str], + covers: Union[dns.rdatatype.RdataType, str] = dns.rdatatype.NONE, + ) -> None: + raise UseTransaction + + def replace_rdataset( + self, name: Union[dns.name.Name, str], replacement: dns.rdataset.Rdataset + ) -> None: + raise UseTransaction diff --git a/vllm/lib/python3.10/site-packages/dns/xfr.py b/vllm/lib/python3.10/site-packages/dns/xfr.py new file mode 100644 index 0000000000000000000000000000000000000000..520aa32ddc32ea50b090ff4b08b9450709a5fb93 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/dns/xfr.py @@ -0,0 +1,343 @@ +# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license + +# Copyright (C) 2003-2017 Nominum, Inc. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose with or without fee is hereby granted, +# provided that the above copyright notice and this permission notice +# appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from typing import Any, List, Optional, Tuple, Union + +import dns.exception +import dns.message +import dns.name +import dns.rcode +import dns.rdataset +import dns.rdatatype +import dns.serial +import dns.transaction +import dns.tsig +import dns.zone + + +class TransferError(dns.exception.DNSException): + """A zone transfer response got a non-zero rcode.""" + + def __init__(self, rcode): + message = f"Zone transfer error: {dns.rcode.to_text(rcode)}" + super().__init__(message) + self.rcode = rcode + + +class SerialWentBackwards(dns.exception.FormError): + """The current serial number is less than the serial we know.""" + + +class UseTCP(dns.exception.DNSException): + """This IXFR cannot be completed with UDP.""" + + +class Inbound: + """ + State machine for zone transfers. + """ + + def __init__( + self, + txn_manager: dns.transaction.TransactionManager, + rdtype: dns.rdatatype.RdataType = dns.rdatatype.AXFR, + serial: Optional[int] = None, + is_udp: bool = False, + ): + """Initialize an inbound zone transfer. + + *txn_manager* is a :py:class:`dns.transaction.TransactionManager`. + + *rdtype* can be `dns.rdatatype.AXFR` or `dns.rdatatype.IXFR` + + *serial* is the base serial number for IXFRs, and is required in + that case. + + *is_udp*, a ``bool`` indidicates if UDP is being used for this + XFR. + """ + self.txn_manager = txn_manager + self.txn: Optional[dns.transaction.Transaction] = None + self.rdtype = rdtype + if rdtype == dns.rdatatype.IXFR: + if serial is None: + raise ValueError("a starting serial must be supplied for IXFRs") + elif is_udp: + raise ValueError("is_udp specified for AXFR") + self.serial = serial + self.is_udp = is_udp + (_, _, self.origin) = txn_manager.origin_information() + self.soa_rdataset: Optional[dns.rdataset.Rdataset] = None + self.done = False + self.expecting_SOA = False + self.delete_mode = False + + def process_message(self, message: dns.message.Message) -> bool: + """Process one message in the transfer. + + The message should have the same relativization as was specified when + the `dns.xfr.Inbound` was created. The message should also have been + created with `one_rr_per_rrset=True` because order matters. + + Returns `True` if the transfer is complete, and `False` otherwise. + """ + if self.txn is None: + replacement = self.rdtype == dns.rdatatype.AXFR + self.txn = self.txn_manager.writer(replacement) + rcode = message.rcode() + if rcode != dns.rcode.NOERROR: + raise TransferError(rcode) + # + # We don't require a question section, but if it is present is + # should be correct. + # + if len(message.question) > 0: + if message.question[0].name != self.origin: + raise dns.exception.FormError("wrong question name") + if message.question[0].rdtype != self.rdtype: + raise dns.exception.FormError("wrong question rdatatype") + answer_index = 0 + if self.soa_rdataset is None: + # + # This is the first message. We're expecting an SOA at + # the origin. + # + if not message.answer or message.answer[0].name != self.origin: + raise dns.exception.FormError("No answer or RRset not for zone origin") + rrset = message.answer[0] + rdataset = rrset + if rdataset.rdtype != dns.rdatatype.SOA: + raise dns.exception.FormError("first RRset is not an SOA") + answer_index = 1 + self.soa_rdataset = rdataset.copy() + if self.rdtype == dns.rdatatype.IXFR: + if self.soa_rdataset[0].serial == self.serial: + # + # We're already up-to-date. + # + self.done = True + elif dns.serial.Serial(self.soa_rdataset[0].serial) < self.serial: + # It went backwards! + raise SerialWentBackwards + else: + if self.is_udp and len(message.answer[answer_index:]) == 0: + # + # There are no more records, so this is the + # "truncated" response. Say to use TCP + # + raise UseTCP + # + # Note we're expecting another SOA so we can detect + # if this IXFR response is an AXFR-style response. + # + self.expecting_SOA = True + # + # Process the answer section (other than the initial SOA in + # the first message). + # + for rrset in message.answer[answer_index:]: + name = rrset.name + rdataset = rrset + if self.done: + raise dns.exception.FormError("answers after final SOA") + assert self.txn is not None # for mypy + if rdataset.rdtype == dns.rdatatype.SOA and name == self.origin: + # + # Every time we see an origin SOA delete_mode inverts + # + if self.rdtype == dns.rdatatype.IXFR: + self.delete_mode = not self.delete_mode + # + # If this SOA Rdataset is equal to the first we saw + # then we're finished. If this is an IXFR we also + # check that we're seeing the record in the expected + # part of the response. + # + if rdataset == self.soa_rdataset and ( + self.rdtype == dns.rdatatype.AXFR + or (self.rdtype == dns.rdatatype.IXFR and self.delete_mode) + ): + # + # This is the final SOA + # + if self.expecting_SOA: + # We got an empty IXFR sequence! + raise dns.exception.FormError("empty IXFR sequence") + if ( + self.rdtype == dns.rdatatype.IXFR + and self.serial != rdataset[0].serial + ): + raise dns.exception.FormError("unexpected end of IXFR sequence") + self.txn.replace(name, rdataset) + self.txn.commit() + self.txn = None + self.done = True + else: + # + # This is not the final SOA + # + self.expecting_SOA = False + if self.rdtype == dns.rdatatype.IXFR: + if self.delete_mode: + # This is the start of an IXFR deletion set + if rdataset[0].serial != self.serial: + raise dns.exception.FormError( + "IXFR base serial mismatch" + ) + else: + # This is the start of an IXFR addition set + self.serial = rdataset[0].serial + self.txn.replace(name, rdataset) + else: + # We saw a non-final SOA for the origin in an AXFR. + raise dns.exception.FormError("unexpected origin SOA in AXFR") + continue + if self.expecting_SOA: + # + # We made an IXFR request and are expecting another + # SOA RR, but saw something else, so this must be an + # AXFR response. + # + self.rdtype = dns.rdatatype.AXFR + self.expecting_SOA = False + self.delete_mode = False + self.txn.rollback() + self.txn = self.txn_manager.writer(True) + # + # Note we are falling through into the code below + # so whatever rdataset this was gets written. + # + # Add or remove the data + if self.delete_mode: + self.txn.delete_exact(name, rdataset) + else: + self.txn.add(name, rdataset) + if self.is_udp and not self.done: + # + # This is a UDP IXFR and we didn't get to done, and we didn't + # get the proper "truncated" response + # + raise dns.exception.FormError("unexpected end of UDP IXFR") + return self.done + + # + # Inbounds are context managers. + # + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if self.txn: + self.txn.rollback() + return False + + +def make_query( + txn_manager: dns.transaction.TransactionManager, + serial: Optional[int] = 0, + use_edns: Optional[Union[int, bool]] = None, + ednsflags: Optional[int] = None, + payload: Optional[int] = None, + request_payload: Optional[int] = None, + options: Optional[List[dns.edns.Option]] = None, + keyring: Any = None, + keyname: Optional[dns.name.Name] = None, + keyalgorithm: Union[dns.name.Name, str] = dns.tsig.default_algorithm, +) -> Tuple[dns.message.QueryMessage, Optional[int]]: + """Make an AXFR or IXFR query. + + *txn_manager* is a ``dns.transaction.TransactionManager``, typically a + ``dns.zone.Zone``. + + *serial* is an ``int`` or ``None``. If 0, then IXFR will be + attempted using the most recent serial number from the + *txn_manager*; it is the caller's responsibility to ensure there + are no write transactions active that could invalidate the + retrieved serial. If a serial cannot be determined, AXFR will be + forced. Other integer values are the starting serial to use. + ``None`` forces an AXFR. + + Please see the documentation for :py:func:`dns.message.make_query` and + :py:func:`dns.message.Message.use_tsig` for details on the other parameters + to this function. + + Returns a `(query, serial)` tuple. + """ + (zone_origin, _, origin) = txn_manager.origin_information() + if zone_origin is None: + raise ValueError("no zone origin") + if serial is None: + rdtype = dns.rdatatype.AXFR + elif not isinstance(serial, int): + raise ValueError("serial is not an integer") + elif serial == 0: + with txn_manager.reader() as txn: + rdataset = txn.get(origin, "SOA") + if rdataset: + serial = rdataset[0].serial + rdtype = dns.rdatatype.IXFR + else: + serial = None + rdtype = dns.rdatatype.AXFR + elif serial > 0 and serial < 4294967296: + rdtype = dns.rdatatype.IXFR + else: + raise ValueError("serial out-of-range") + rdclass = txn_manager.get_class() + q = dns.message.make_query( + zone_origin, + rdtype, + rdclass, + use_edns, + False, + ednsflags, + payload, + request_payload, + options, + ) + if serial is not None: + rdata = dns.rdata.from_text(rdclass, "SOA", f". . {serial} 0 0 0 0") + rrset = q.find_rrset( + q.authority, zone_origin, rdclass, dns.rdatatype.SOA, create=True + ) + rrset.add(rdata, 0) + if keyring is not None: + q.use_tsig(keyring, keyname, algorithm=keyalgorithm) + return (q, serial) + + +def extract_serial_from_query(query: dns.message.Message) -> Optional[int]: + """Extract the SOA serial number from query if it is an IXFR and return + it, otherwise return None. + + *query* is a dns.message.QueryMessage that is an IXFR or AXFR request. + + Raises if the query is not an IXFR or AXFR, or if an IXFR doesn't have + an appropriate SOA RRset in the authority section. + """ + if not isinstance(query, dns.message.QueryMessage): + raise ValueError("query not a QueryMessage") + question = query.question[0] + if question.rdtype == dns.rdatatype.AXFR: + return None + elif question.rdtype != dns.rdatatype.IXFR: + raise ValueError("query is not an AXFR or IXFR") + soa = query.find_rrset( + query.authority, question.name, question.rdclass, dns.rdatatype.SOA + ) + return soa[0].serial diff --git a/vllm/lib/python3.10/site-packages/dns/zone.py b/vllm/lib/python3.10/site-packages/dns/zone.py new file mode 100644 index 0000000000000000000000000000000000000000..844919e41f1162d44c276a329223df0e05e4dabc --- /dev/null +++ b/vllm/lib/python3.10/site-packages/dns/zone.py @@ -0,0 +1,1434 @@ +# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license + +# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose with or without fee is hereby granted, +# provided that the above copyright notice and this permission notice +# appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""DNS Zones.""" + +import contextlib +import io +import os +import struct +from typing import ( + Any, + Callable, + Iterable, + Iterator, + List, + MutableMapping, + Optional, + Set, + Tuple, + Union, +) + +import dns.exception +import dns.grange +import dns.immutable +import dns.name +import dns.node +import dns.rdata +import dns.rdataclass +import dns.rdataset +import dns.rdatatype +import dns.rdtypes.ANY.SOA +import dns.rdtypes.ANY.ZONEMD +import dns.rrset +import dns.tokenizer +import dns.transaction +import dns.ttl +import dns.zonefile +from dns.zonetypes import DigestHashAlgorithm, DigestScheme, _digest_hashers + + +class BadZone(dns.exception.DNSException): + """The DNS zone is malformed.""" + + +class NoSOA(BadZone): + """The DNS zone has no SOA RR at its origin.""" + + +class NoNS(BadZone): + """The DNS zone has no NS RRset at its origin.""" + + +class UnknownOrigin(BadZone): + """The DNS zone's origin is unknown.""" + + +class UnsupportedDigestScheme(dns.exception.DNSException): + """The zone digest's scheme is unsupported.""" + + +class UnsupportedDigestHashAlgorithm(dns.exception.DNSException): + """The zone digest's origin is unsupported.""" + + +class NoDigest(dns.exception.DNSException): + """The DNS zone has no ZONEMD RRset at its origin.""" + + +class DigestVerificationFailure(dns.exception.DNSException): + """The ZONEMD digest failed to verify.""" + + +def _validate_name( + name: dns.name.Name, + origin: Optional[dns.name.Name], + relativize: bool, +) -> dns.name.Name: + # This name validation code is shared by Zone and Version + if origin is None: + # This should probably never happen as other code (e.g. + # _rr_line) will notice the lack of an origin before us, but + # we check just in case! + raise KeyError("no zone origin is defined") + if name.is_absolute(): + if not name.is_subdomain(origin): + raise KeyError("name parameter must be a subdomain of the zone origin") + if relativize: + name = name.relativize(origin) + else: + # We have a relative name. Make sure that the derelativized name is + # not too long. + try: + abs_name = name.derelativize(origin) + except dns.name.NameTooLong: + # We map dns.name.NameTooLong to KeyError to be consistent with + # the other exceptions above. + raise KeyError("relative name too long for zone") + if not relativize: + # We have a relative name in a non-relative zone, so use the + # derelativized name. + name = abs_name + return name + + +class Zone(dns.transaction.TransactionManager): + """A DNS zone. + + A ``Zone`` is a mapping from names to nodes. The zone object may be + treated like a Python dictionary, e.g. ``zone[name]`` will retrieve + the node associated with that name. The *name* may be a + ``dns.name.Name object``, or it may be a string. In either case, + if the name is relative it is treated as relative to the origin of + the zone. + """ + + node_factory: Callable[[], dns.node.Node] = dns.node.Node + map_factory: Callable[[], MutableMapping[dns.name.Name, dns.node.Node]] = dict + writable_version_factory: Optional[Callable[[], "WritableVersion"]] = None + immutable_version_factory: Optional[Callable[[], "ImmutableVersion"]] = None + + __slots__ = ["rdclass", "origin", "nodes", "relativize"] + + def __init__( + self, + origin: Optional[Union[dns.name.Name, str]], + rdclass: dns.rdataclass.RdataClass = dns.rdataclass.IN, + relativize: bool = True, + ): + """Initialize a zone object. + + *origin* is the origin of the zone. It may be a ``dns.name.Name``, + a ``str``, or ``None``. If ``None``, then the zone's origin will + be set by the first ``$ORIGIN`` line in a zone file. + + *rdclass*, an ``int``, the zone's rdata class; the default is class IN. + + *relativize*, a ``bool``, determine's whether domain names are + relativized to the zone's origin. The default is ``True``. + """ + + if origin is not None: + if isinstance(origin, str): + origin = dns.name.from_text(origin) + elif not isinstance(origin, dns.name.Name): + raise ValueError("origin parameter must be convertible to a DNS name") + if not origin.is_absolute(): + raise ValueError("origin parameter must be an absolute name") + self.origin = origin + self.rdclass = rdclass + self.nodes: MutableMapping[dns.name.Name, dns.node.Node] = self.map_factory() + self.relativize = relativize + + def __eq__(self, other): + """Two zones are equal if they have the same origin, class, and + nodes. + + Returns a ``bool``. + """ + + if not isinstance(other, Zone): + return False + if ( + self.rdclass != other.rdclass + or self.origin != other.origin + or self.nodes != other.nodes + ): + return False + return True + + def __ne__(self, other): + """Are two zones not equal? + + Returns a ``bool``. + """ + + return not self.__eq__(other) + + def _validate_name(self, name: Union[dns.name.Name, str]) -> dns.name.Name: + # Note that any changes in this method should have corresponding changes + # made in the Version _validate_name() method. + if isinstance(name, str): + name = dns.name.from_text(name, None) + elif not isinstance(name, dns.name.Name): + raise KeyError("name parameter must be convertible to a DNS name") + return _validate_name(name, self.origin, self.relativize) + + def __getitem__(self, key): + key = self._validate_name(key) + return self.nodes[key] + + def __setitem__(self, key, value): + key = self._validate_name(key) + self.nodes[key] = value + + def __delitem__(self, key): + key = self._validate_name(key) + del self.nodes[key] + + def __iter__(self): + return self.nodes.__iter__() + + def keys(self): + return self.nodes.keys() + + def values(self): + return self.nodes.values() + + def items(self): + return self.nodes.items() + + def get(self, key): + key = self._validate_name(key) + return self.nodes.get(key) + + def __contains__(self, key): + key = self._validate_name(key) + return key in self.nodes + + def find_node( + self, name: Union[dns.name.Name, str], create: bool = False + ) -> dns.node.Node: + """Find a node in the zone, possibly creating it. + + *name*: the name of the node to find. + The value may be a ``dns.name.Name`` or a ``str``. If absolute, the + name must be a subdomain of the zone's origin. If ``zone.relativize`` + is ``True``, then the name will be relativized. + + *create*, a ``bool``. If true, the node will be created if it does + not exist. + + Raises ``KeyError`` if the name is not known and create was + not specified, or if the name was not a subdomain of the origin. + + Returns a ``dns.node.Node``. + """ + + name = self._validate_name(name) + node = self.nodes.get(name) + if node is None: + if not create: + raise KeyError + node = self.node_factory() + self.nodes[name] = node + return node + + def get_node( + self, name: Union[dns.name.Name, str], create: bool = False + ) -> Optional[dns.node.Node]: + """Get a node in the zone, possibly creating it. + + This method is like ``find_node()``, except it returns None instead + of raising an exception if the node does not exist and creation + has not been requested. + + *name*: the name of the node to find. + The value may be a ``dns.name.Name`` or a ``str``. If absolute, the + name must be a subdomain of the zone's origin. If ``zone.relativize`` + is ``True``, then the name will be relativized. + + *create*, a ``bool``. If true, the node will be created if it does + not exist. + + Returns a ``dns.node.Node`` or ``None``. + """ + + try: + node = self.find_node(name, create) + except KeyError: + node = None + return node + + def delete_node(self, name: Union[dns.name.Name, str]) -> None: + """Delete the specified node if it exists. + + *name*: the name of the node to find. + The value may be a ``dns.name.Name`` or a ``str``. If absolute, the + name must be a subdomain of the zone's origin. If ``zone.relativize`` + is ``True``, then the name will be relativized. + + It is not an error if the node does not exist. + """ + + name = self._validate_name(name) + if name in self.nodes: + del self.nodes[name] + + def find_rdataset( + self, + name: Union[dns.name.Name, str], + rdtype: Union[dns.rdatatype.RdataType, str], + covers: Union[dns.rdatatype.RdataType, str] = dns.rdatatype.NONE, + create: bool = False, + ) -> dns.rdataset.Rdataset: + """Look for an rdataset with the specified name and type in the zone, + and return an rdataset encapsulating it. + + The rdataset returned is not a copy; changes to it will change + the zone. + + KeyError is raised if the name or type are not found. + + *name*: the name of the node to find. + The value may be a ``dns.name.Name`` or a ``str``. If absolute, the + name must be a subdomain of the zone's origin. If ``zone.relativize`` + is ``True``, then the name will be relativized. + + *rdtype*, a ``dns.rdatatype.RdataType`` or ``str``, the rdata type desired. + + *covers*, a ``dns.rdatatype.RdataType`` or ``str`` the covered type. + Usually this value is ``dns.rdatatype.NONE``, but if the + rdtype is ``dns.rdatatype.SIG`` or ``dns.rdatatype.RRSIG``, + then the covers value will be the rdata type the SIG/RRSIG + covers. The library treats the SIG and RRSIG types as if they + were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). + This makes RRSIGs much easier to work with than if RRSIGs + covering different rdata types were aggregated into a single + RRSIG rdataset. + + *create*, a ``bool``. If true, the node will be created if it does + not exist. + + Raises ``KeyError`` if the name is not known and create was + not specified, or if the name was not a subdomain of the origin. + + Returns a ``dns.rdataset.Rdataset``. + """ + + name = self._validate_name(name) + rdtype = dns.rdatatype.RdataType.make(rdtype) + covers = dns.rdatatype.RdataType.make(covers) + node = self.find_node(name, create) + return node.find_rdataset(self.rdclass, rdtype, covers, create) + + def get_rdataset( + self, + name: Union[dns.name.Name, str], + rdtype: Union[dns.rdatatype.RdataType, str], + covers: Union[dns.rdatatype.RdataType, str] = dns.rdatatype.NONE, + create: bool = False, + ) -> Optional[dns.rdataset.Rdataset]: + """Look for an rdataset with the specified name and type in the zone. + + This method is like ``find_rdataset()``, except it returns None instead + of raising an exception if the rdataset does not exist and creation + has not been requested. + + The rdataset returned is not a copy; changes to it will change + the zone. + + *name*: the name of the node to find. + The value may be a ``dns.name.Name`` or a ``str``. If absolute, the + name must be a subdomain of the zone's origin. If ``zone.relativize`` + is ``True``, then the name will be relativized. + + *rdtype*, a ``dns.rdatatype.RdataType`` or ``str``, the rdata type desired. + + *covers*, a ``dns.rdatatype.RdataType`` or ``str``, the covered type. + Usually this value is ``dns.rdatatype.NONE``, but if the + rdtype is ``dns.rdatatype.SIG`` or ``dns.rdatatype.RRSIG``, + then the covers value will be the rdata type the SIG/RRSIG + covers. The library treats the SIG and RRSIG types as if they + were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). + This makes RRSIGs much easier to work with than if RRSIGs + covering different rdata types were aggregated into a single + RRSIG rdataset. + + *create*, a ``bool``. If true, the node will be created if it does + not exist. + + Raises ``KeyError`` if the name is not known and create was + not specified, or if the name was not a subdomain of the origin. + + Returns a ``dns.rdataset.Rdataset`` or ``None``. + """ + + try: + rdataset = self.find_rdataset(name, rdtype, covers, create) + except KeyError: + rdataset = None + return rdataset + + def delete_rdataset( + self, + name: Union[dns.name.Name, str], + rdtype: Union[dns.rdatatype.RdataType, str], + covers: Union[dns.rdatatype.RdataType, str] = dns.rdatatype.NONE, + ) -> None: + """Delete the rdataset matching *rdtype* and *covers*, if it + exists at the node specified by *name*. + + It is not an error if the node does not exist, or if there is no matching + rdataset at the node. + + If the node has no rdatasets after the deletion, it will itself be deleted. + + *name*: the name of the node to find. The value may be a ``dns.name.Name`` or a + ``str``. If absolute, the name must be a subdomain of the zone's origin. If + ``zone.relativize`` is ``True``, then the name will be relativized. + + *rdtype*, a ``dns.rdatatype.RdataType`` or ``str``, the rdata type desired. + + *covers*, a ``dns.rdatatype.RdataType`` or ``str`` or ``None``, the covered + type. Usually this value is ``dns.rdatatype.NONE``, but if the rdtype is + ``dns.rdatatype.SIG`` or ``dns.rdatatype.RRSIG``, then the covers value will be + the rdata type the SIG/RRSIG covers. The library treats the SIG and RRSIG types + as if they were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This + makes RRSIGs much easier to work with than if RRSIGs covering different rdata + types were aggregated into a single RRSIG rdataset. + """ + + name = self._validate_name(name) + rdtype = dns.rdatatype.RdataType.make(rdtype) + covers = dns.rdatatype.RdataType.make(covers) + node = self.get_node(name) + if node is not None: + node.delete_rdataset(self.rdclass, rdtype, covers) + if len(node) == 0: + self.delete_node(name) + + def replace_rdataset( + self, name: Union[dns.name.Name, str], replacement: dns.rdataset.Rdataset + ) -> None: + """Replace an rdataset at name. + + It is not an error if there is no rdataset matching I{replacement}. + + Ownership of the *replacement* object is transferred to the zone; + in other words, this method does not store a copy of *replacement* + at the node, it stores *replacement* itself. + + If the node does not exist, it is created. + + *name*: the name of the node to find. + The value may be a ``dns.name.Name`` or a ``str``. If absolute, the + name must be a subdomain of the zone's origin. If ``zone.relativize`` + is ``True``, then the name will be relativized. + + *replacement*, a ``dns.rdataset.Rdataset``, the replacement rdataset. + """ + + if replacement.rdclass != self.rdclass: + raise ValueError("replacement.rdclass != zone.rdclass") + node = self.find_node(name, True) + node.replace_rdataset(replacement) + + def find_rrset( + self, + name: Union[dns.name.Name, str], + rdtype: Union[dns.rdatatype.RdataType, str], + covers: Union[dns.rdatatype.RdataType, str] = dns.rdatatype.NONE, + ) -> dns.rrset.RRset: + """Look for an rdataset with the specified name and type in the zone, + and return an RRset encapsulating it. + + This method is less efficient than the similar + ``find_rdataset()`` because it creates an RRset instead of + returning the matching rdataset. It may be more convenient + for some uses since it returns an object which binds the owner + name to the rdataset. + + This method may not be used to create new nodes or rdatasets; + use ``find_rdataset`` instead. + + *name*: the name of the node to find. + The value may be a ``dns.name.Name`` or a ``str``. If absolute, the + name must be a subdomain of the zone's origin. If ``zone.relativize`` + is ``True``, then the name will be relativized. + + *rdtype*, a ``dns.rdatatype.RdataType`` or ``str``, the rdata type desired. + + *covers*, a ``dns.rdatatype.RdataType`` or ``str``, the covered type. + Usually this value is ``dns.rdatatype.NONE``, but if the + rdtype is ``dns.rdatatype.SIG`` or ``dns.rdatatype.RRSIG``, + then the covers value will be the rdata type the SIG/RRSIG + covers. The library treats the SIG and RRSIG types as if they + were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). + This makes RRSIGs much easier to work with than if RRSIGs + covering different rdata types were aggregated into a single + RRSIG rdataset. + + *create*, a ``bool``. If true, the node will be created if it does + not exist. + + Raises ``KeyError`` if the name is not known and create was + not specified, or if the name was not a subdomain of the origin. + + Returns a ``dns.rrset.RRset`` or ``None``. + """ + + vname = self._validate_name(name) + rdtype = dns.rdatatype.RdataType.make(rdtype) + covers = dns.rdatatype.RdataType.make(covers) + rdataset = self.nodes[vname].find_rdataset(self.rdclass, rdtype, covers) + rrset = dns.rrset.RRset(vname, self.rdclass, rdtype, covers) + rrset.update(rdataset) + return rrset + + def get_rrset( + self, + name: Union[dns.name.Name, str], + rdtype: Union[dns.rdatatype.RdataType, str], + covers: Union[dns.rdatatype.RdataType, str] = dns.rdatatype.NONE, + ) -> Optional[dns.rrset.RRset]: + """Look for an rdataset with the specified name and type in the zone, + and return an RRset encapsulating it. + + This method is less efficient than the similar ``get_rdataset()`` + because it creates an RRset instead of returning the matching + rdataset. It may be more convenient for some uses since it + returns an object which binds the owner name to the rdataset. + + This method may not be used to create new nodes or rdatasets; + use ``get_rdataset()`` instead. + + *name*: the name of the node to find. + The value may be a ``dns.name.Name`` or a ``str``. If absolute, the + name must be a subdomain of the zone's origin. If ``zone.relativize`` + is ``True``, then the name will be relativized. + + *rdtype*, a ``dns.rdataset.Rdataset`` or ``str``, the rdata type desired. + + *covers*, a ``dns.rdataset.Rdataset`` or ``str``, the covered type. + Usually this value is ``dns.rdatatype.NONE``, but if the + rdtype is ``dns.rdatatype.SIG`` or ``dns.rdatatype.RRSIG``, + then the covers value will be the rdata type the SIG/RRSIG + covers. The library treats the SIG and RRSIG types as if they + were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). + This makes RRSIGs much easier to work with than if RRSIGs + covering different rdata types were aggregated into a single + RRSIG rdataset. + + *create*, a ``bool``. If true, the node will be created if it does + not exist. + + Returns a ``dns.rrset.RRset`` or ``None``. + """ + + try: + rrset = self.find_rrset(name, rdtype, covers) + except KeyError: + rrset = None + return rrset + + def iterate_rdatasets( + self, + rdtype: Union[dns.rdatatype.RdataType, str] = dns.rdatatype.ANY, + covers: Union[dns.rdatatype.RdataType, str] = dns.rdatatype.NONE, + ) -> Iterator[Tuple[dns.name.Name, dns.rdataset.Rdataset]]: + """Return a generator which yields (name, rdataset) tuples for + all rdatasets in the zone which have the specified *rdtype* + and *covers*. If *rdtype* is ``dns.rdatatype.ANY``, the default, + then all rdatasets will be matched. + + *rdtype*, a ``dns.rdataset.Rdataset`` or ``str``, the rdata type desired. + + *covers*, a ``dns.rdataset.Rdataset`` or ``str``, the covered type. + Usually this value is ``dns.rdatatype.NONE``, but if the + rdtype is ``dns.rdatatype.SIG`` or ``dns.rdatatype.RRSIG``, + then the covers value will be the rdata type the SIG/RRSIG + covers. The library treats the SIG and RRSIG types as if they + were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). + This makes RRSIGs much easier to work with than if RRSIGs + covering different rdata types were aggregated into a single + RRSIG rdataset. + """ + + rdtype = dns.rdatatype.RdataType.make(rdtype) + covers = dns.rdatatype.RdataType.make(covers) + for name, node in self.items(): + for rds in node: + if rdtype == dns.rdatatype.ANY or ( + rds.rdtype == rdtype and rds.covers == covers + ): + yield (name, rds) + + def iterate_rdatas( + self, + rdtype: Union[dns.rdatatype.RdataType, str] = dns.rdatatype.ANY, + covers: Union[dns.rdatatype.RdataType, str] = dns.rdatatype.NONE, + ) -> Iterator[Tuple[dns.name.Name, int, dns.rdata.Rdata]]: + """Return a generator which yields (name, ttl, rdata) tuples for + all rdatas in the zone which have the specified *rdtype* + and *covers*. If *rdtype* is ``dns.rdatatype.ANY``, the default, + then all rdatas will be matched. + + *rdtype*, a ``dns.rdataset.Rdataset`` or ``str``, the rdata type desired. + + *covers*, a ``dns.rdataset.Rdataset`` or ``str``, the covered type. + Usually this value is ``dns.rdatatype.NONE``, but if the + rdtype is ``dns.rdatatype.SIG`` or ``dns.rdatatype.RRSIG``, + then the covers value will be the rdata type the SIG/RRSIG + covers. The library treats the SIG and RRSIG types as if they + were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). + This makes RRSIGs much easier to work with than if RRSIGs + covering different rdata types were aggregated into a single + RRSIG rdataset. + """ + + rdtype = dns.rdatatype.RdataType.make(rdtype) + covers = dns.rdatatype.RdataType.make(covers) + for name, node in self.items(): + for rds in node: + if rdtype == dns.rdatatype.ANY or ( + rds.rdtype == rdtype and rds.covers == covers + ): + for rdata in rds: + yield (name, rds.ttl, rdata) + + def to_file( + self, + f: Any, + sorted: bool = True, + relativize: bool = True, + nl: Optional[str] = None, + want_comments: bool = False, + want_origin: bool = False, + ) -> None: + """Write a zone to a file. + + *f*, a file or `str`. If *f* is a string, it is treated + as the name of a file to open. + + *sorted*, a ``bool``. If True, the default, then the file + will be written with the names sorted in DNSSEC order from + least to greatest. Otherwise the names will be written in + whatever order they happen to have in the zone's dictionary. + + *relativize*, a ``bool``. If True, the default, then domain + names in the output will be relativized to the zone's origin + if possible. + + *nl*, a ``str`` or None. The end of line string. If not + ``None``, the output will use the platform's native + end-of-line marker (i.e. LF on POSIX, CRLF on Windows). + + *want_comments*, a ``bool``. If ``True``, emit end-of-line comments + as part of writing the file. If ``False``, the default, do not + emit them. + + *want_origin*, a ``bool``. If ``True``, emit a $ORIGIN line at + the start of the file. If ``False``, the default, do not emit + one. + """ + + if isinstance(f, str): + cm: contextlib.AbstractContextManager = open(f, "wb") + else: + cm = contextlib.nullcontext(f) + with cm as f: + # must be in this way, f.encoding may contain None, or even + # attribute may not be there + file_enc = getattr(f, "encoding", None) + if file_enc is None: + file_enc = "utf-8" + + if nl is None: + # binary mode, '\n' is not enough + nl_b = os.linesep.encode(file_enc) + nl = "\n" + elif isinstance(nl, str): + nl_b = nl.encode(file_enc) + else: + nl_b = nl + nl = nl.decode() + + if want_origin: + assert self.origin is not None + l = "$ORIGIN " + self.origin.to_text() + l_b = l.encode(file_enc) + try: + f.write(l_b) + f.write(nl_b) + except TypeError: # textual mode + f.write(l) + f.write(nl) + + if sorted: + names = list(self.keys()) + names.sort() + else: + names = self.keys() + for n in names: + l = self[n].to_text( + n, + origin=self.origin, + relativize=relativize, + want_comments=want_comments, + ) + l_b = l.encode(file_enc) + + try: + f.write(l_b) + f.write(nl_b) + except TypeError: # textual mode + f.write(l) + f.write(nl) + + def to_text( + self, + sorted: bool = True, + relativize: bool = True, + nl: Optional[str] = None, + want_comments: bool = False, + want_origin: bool = False, + ) -> str: + """Return a zone's text as though it were written to a file. + + *sorted*, a ``bool``. If True, the default, then the file + will be written with the names sorted in DNSSEC order from + least to greatest. Otherwise the names will be written in + whatever order they happen to have in the zone's dictionary. + + *relativize*, a ``bool``. If True, the default, then domain + names in the output will be relativized to the zone's origin + if possible. + + *nl*, a ``str`` or None. The end of line string. If not + ``None``, the output will use the platform's native + end-of-line marker (i.e. LF on POSIX, CRLF on Windows). + + *want_comments*, a ``bool``. If ``True``, emit end-of-line comments + as part of writing the file. If ``False``, the default, do not + emit them. + + *want_origin*, a ``bool``. If ``True``, emit a $ORIGIN line at + the start of the output. If ``False``, the default, do not emit + one. + + Returns a ``str``. + """ + temp_buffer = io.StringIO() + self.to_file(temp_buffer, sorted, relativize, nl, want_comments, want_origin) + return_value = temp_buffer.getvalue() + temp_buffer.close() + return return_value + + def check_origin(self) -> None: + """Do some simple checking of the zone's origin. + + Raises ``dns.zone.NoSOA`` if there is no SOA RRset. + + Raises ``dns.zone.NoNS`` if there is no NS RRset. + + Raises ``KeyError`` if there is no origin node. + """ + if self.relativize: + name = dns.name.empty + else: + assert self.origin is not None + name = self.origin + if self.get_rdataset(name, dns.rdatatype.SOA) is None: + raise NoSOA + if self.get_rdataset(name, dns.rdatatype.NS) is None: + raise NoNS + + def get_soa( + self, txn: Optional[dns.transaction.Transaction] = None + ) -> dns.rdtypes.ANY.SOA.SOA: + """Get the zone SOA rdata. + + Raises ``dns.zone.NoSOA`` if there is no SOA RRset. + + Returns a ``dns.rdtypes.ANY.SOA.SOA`` Rdata. + """ + if self.relativize: + origin_name = dns.name.empty + else: + if self.origin is None: + # get_soa() has been called very early, and there must not be + # an SOA if there is no origin. + raise NoSOA + origin_name = self.origin + soa: Optional[dns.rdataset.Rdataset] + if txn: + soa = txn.get(origin_name, dns.rdatatype.SOA) + else: + soa = self.get_rdataset(origin_name, dns.rdatatype.SOA) + if soa is None: + raise NoSOA + return soa[0] + + def _compute_digest( + self, + hash_algorithm: DigestHashAlgorithm, + scheme: DigestScheme = DigestScheme.SIMPLE, + ) -> bytes: + hashinfo = _digest_hashers.get(hash_algorithm) + if not hashinfo: + raise UnsupportedDigestHashAlgorithm + if scheme != DigestScheme.SIMPLE: + raise UnsupportedDigestScheme + + if self.relativize: + origin_name = dns.name.empty + else: + assert self.origin is not None + origin_name = self.origin + hasher = hashinfo() + for name, node in sorted(self.items()): + rrnamebuf = name.to_digestable(self.origin) + for rdataset in sorted(node, key=lambda rds: (rds.rdtype, rds.covers)): + if name == origin_name and dns.rdatatype.ZONEMD in ( + rdataset.rdtype, + rdataset.covers, + ): + continue + rrfixed = struct.pack( + "!HHI", rdataset.rdtype, rdataset.rdclass, rdataset.ttl + ) + rdatas = [rdata.to_digestable(self.origin) for rdata in rdataset] + for rdata in sorted(rdatas): + rrlen = struct.pack("!H", len(rdata)) + hasher.update(rrnamebuf + rrfixed + rrlen + rdata) + return hasher.digest() + + def compute_digest( + self, + hash_algorithm: DigestHashAlgorithm, + scheme: DigestScheme = DigestScheme.SIMPLE, + ) -> dns.rdtypes.ANY.ZONEMD.ZONEMD: + serial = self.get_soa().serial + digest = self._compute_digest(hash_algorithm, scheme) + return dns.rdtypes.ANY.ZONEMD.ZONEMD( + self.rdclass, dns.rdatatype.ZONEMD, serial, scheme, hash_algorithm, digest + ) + + def verify_digest( + self, zonemd: Optional[dns.rdtypes.ANY.ZONEMD.ZONEMD] = None + ) -> None: + digests: Union[dns.rdataset.Rdataset, List[dns.rdtypes.ANY.ZONEMD.ZONEMD]] + if zonemd: + digests = [zonemd] + else: + assert self.origin is not None + rds = self.get_rdataset(self.origin, dns.rdatatype.ZONEMD) + if rds is None: + raise NoDigest + digests = rds + for digest in digests: + try: + computed = self._compute_digest(digest.hash_algorithm, digest.scheme) + if computed == digest.digest: + return + except Exception: + pass + raise DigestVerificationFailure + + # TransactionManager methods + + def reader(self) -> "Transaction": + return Transaction(self, False, Version(self, 1, self.nodes, self.origin)) + + def writer(self, replacement: bool = False) -> "Transaction": + txn = Transaction(self, replacement) + txn._setup_version() + return txn + + def origin_information( + self, + ) -> Tuple[Optional[dns.name.Name], bool, Optional[dns.name.Name]]: + effective: Optional[dns.name.Name] + if self.relativize: + effective = dns.name.empty + else: + effective = self.origin + return (self.origin, self.relativize, effective) + + def get_class(self): + return self.rdclass + + # Transaction methods + + def _end_read(self, txn): + pass + + def _end_write(self, txn): + pass + + def _commit_version(self, _, version, origin): + self.nodes = version.nodes + if self.origin is None: + self.origin = origin + + def _get_next_version_id(self): + # Versions are ephemeral and all have id 1 + return 1 + + +# These classes used to be in dns.versioned, but have moved here so we can use +# the copy-on-write transaction mechanism for both kinds of zones. In a +# regular zone, the version only exists during the transaction, and the nodes +# are regular dns.node.Nodes. + +# A node with a version id. + + +class VersionedNode(dns.node.Node): # lgtm[py/missing-equals] + __slots__ = ["id"] + + def __init__(self): + super().__init__() + # A proper id will get set by the Version + self.id = 0 + + +@dns.immutable.immutable +class ImmutableVersionedNode(VersionedNode): + def __init__(self, node): + super().__init__() + self.id = node.id + self.rdatasets = tuple( + [dns.rdataset.ImmutableRdataset(rds) for rds in node.rdatasets] + ) + + def find_rdataset( + self, + rdclass: dns.rdataclass.RdataClass, + rdtype: dns.rdatatype.RdataType, + covers: dns.rdatatype.RdataType = dns.rdatatype.NONE, + create: bool = False, + ) -> dns.rdataset.Rdataset: + if create: + raise TypeError("immutable") + return super().find_rdataset(rdclass, rdtype, covers, False) + + def get_rdataset( + self, + rdclass: dns.rdataclass.RdataClass, + rdtype: dns.rdatatype.RdataType, + covers: dns.rdatatype.RdataType = dns.rdatatype.NONE, + create: bool = False, + ) -> Optional[dns.rdataset.Rdataset]: + if create: + raise TypeError("immutable") + return super().get_rdataset(rdclass, rdtype, covers, False) + + def delete_rdataset( + self, + rdclass: dns.rdataclass.RdataClass, + rdtype: dns.rdatatype.RdataType, + covers: dns.rdatatype.RdataType = dns.rdatatype.NONE, + ) -> None: + raise TypeError("immutable") + + def replace_rdataset(self, replacement: dns.rdataset.Rdataset) -> None: + raise TypeError("immutable") + + def is_immutable(self) -> bool: + return True + + +class Version: + def __init__( + self, + zone: Zone, + id: int, + nodes: Optional[MutableMapping[dns.name.Name, dns.node.Node]] = None, + origin: Optional[dns.name.Name] = None, + ): + self.zone = zone + self.id = id + if nodes is not None: + self.nodes = nodes + else: + self.nodes = zone.map_factory() + self.origin = origin + + def _validate_name(self, name: dns.name.Name) -> dns.name.Name: + return _validate_name(name, self.origin, self.zone.relativize) + + def get_node(self, name: dns.name.Name) -> Optional[dns.node.Node]: + name = self._validate_name(name) + return self.nodes.get(name) + + def get_rdataset( + self, + name: dns.name.Name, + rdtype: dns.rdatatype.RdataType, + covers: dns.rdatatype.RdataType, + ) -> Optional[dns.rdataset.Rdataset]: + node = self.get_node(name) + if node is None: + return None + return node.get_rdataset(self.zone.rdclass, rdtype, covers) + + def keys(self): + return self.nodes.keys() + + def items(self): + return self.nodes.items() + + +class WritableVersion(Version): + def __init__(self, zone: Zone, replacement: bool = False): + # The zone._versions_lock must be held by our caller in a versioned + # zone. + id = zone._get_next_version_id() + super().__init__(zone, id) + if not replacement: + # We copy the map, because that gives us a simple and thread-safe + # way of doing versions, and we have a garbage collector to help + # us. We only make new node objects if we actually change the + # node. + self.nodes.update(zone.nodes) + # We have to copy the zone origin as it may be None in the first + # version, and we don't want to mutate the zone until we commit. + self.origin = zone.origin + self.changed: Set[dns.name.Name] = set() + + def _maybe_cow(self, name: dns.name.Name) -> dns.node.Node: + name = self._validate_name(name) + node = self.nodes.get(name) + if node is None or name not in self.changed: + new_node = self.zone.node_factory() + if hasattr(new_node, "id"): + # We keep doing this for backwards compatibility, as earlier + # code used new_node.id != self.id for the "do we need to CoW?" + # test. Now we use the changed set as this works with both + # regular zones and versioned zones. + # + # We ignore the mypy error as this is safe but it doesn't see it. + new_node.id = self.id # type: ignore + if node is not None: + # moo! copy on write! + new_node.rdatasets.extend(node.rdatasets) + self.nodes[name] = new_node + self.changed.add(name) + return new_node + else: + return node + + def delete_node(self, name: dns.name.Name) -> None: + name = self._validate_name(name) + if name in self.nodes: + del self.nodes[name] + self.changed.add(name) + + def put_rdataset( + self, name: dns.name.Name, rdataset: dns.rdataset.Rdataset + ) -> None: + node = self._maybe_cow(name) + node.replace_rdataset(rdataset) + + def delete_rdataset( + self, + name: dns.name.Name, + rdtype: dns.rdatatype.RdataType, + covers: dns.rdatatype.RdataType, + ) -> None: + node = self._maybe_cow(name) + node.delete_rdataset(self.zone.rdclass, rdtype, covers) + if len(node) == 0: + del self.nodes[name] + + +@dns.immutable.immutable +class ImmutableVersion(Version): + def __init__(self, version: WritableVersion): + # We tell super() that it's a replacement as we don't want it + # to copy the nodes, as we're about to do that with an + # immutable Dict. + super().__init__(version.zone, True) + # set the right id! + self.id = version.id + # keep the origin + self.origin = version.origin + # Make changed nodes immutable + for name in version.changed: + node = version.nodes.get(name) + # it might not exist if we deleted it in the version + if node: + version.nodes[name] = ImmutableVersionedNode(node) + # We're changing the type of the nodes dictionary here on purpose, so + # we ignore the mypy error. + self.nodes = dns.immutable.Dict( + version.nodes, True, self.zone.map_factory + ) # type: ignore + + +class Transaction(dns.transaction.Transaction): + def __init__(self, zone, replacement, version=None, make_immutable=False): + read_only = version is not None + super().__init__(zone, replacement, read_only) + self.version = version + self.make_immutable = make_immutable + + @property + def zone(self): + return self.manager + + def _setup_version(self): + assert self.version is None + factory = self.manager.writable_version_factory + if factory is None: + factory = WritableVersion + self.version = factory(self.zone, self.replacement) + + def _get_rdataset(self, name, rdtype, covers): + return self.version.get_rdataset(name, rdtype, covers) + + def _put_rdataset(self, name, rdataset): + assert not self.read_only + self.version.put_rdataset(name, rdataset) + + def _delete_name(self, name): + assert not self.read_only + self.version.delete_node(name) + + def _delete_rdataset(self, name, rdtype, covers): + assert not self.read_only + self.version.delete_rdataset(name, rdtype, covers) + + def _name_exists(self, name): + return self.version.get_node(name) is not None + + def _changed(self): + if self.read_only: + return False + else: + return len(self.version.changed) > 0 + + def _end_transaction(self, commit): + if self.read_only: + self.zone._end_read(self) + elif commit and len(self.version.changed) > 0: + if self.make_immutable: + factory = self.manager.immutable_version_factory + if factory is None: + factory = ImmutableVersion + version = factory(self.version) + else: + version = self.version + self.zone._commit_version(self, version, self.version.origin) + else: + # rollback + self.zone._end_write(self) + + def _set_origin(self, origin): + if self.version.origin is None: + self.version.origin = origin + + def _iterate_rdatasets(self): + for name, node in self.version.items(): + for rdataset in node: + yield (name, rdataset) + + def _iterate_names(self): + return self.version.keys() + + def _get_node(self, name): + return self.version.get_node(name) + + def _origin_information(self): + (absolute, relativize, effective) = self.manager.origin_information() + if absolute is None and self.version.origin is not None: + # No origin has been committed yet, but we've learned one as part of + # this txn. Use it. + absolute = self.version.origin + if relativize: + effective = dns.name.empty + else: + effective = absolute + return (absolute, relativize, effective) + + +def _from_text( + text: Any, + origin: Optional[Union[dns.name.Name, str]] = None, + rdclass: dns.rdataclass.RdataClass = dns.rdataclass.IN, + relativize: bool = True, + zone_factory: Any = Zone, + filename: Optional[str] = None, + allow_include: bool = False, + check_origin: bool = True, + idna_codec: Optional[dns.name.IDNACodec] = None, + allow_directives: Union[bool, Iterable[str]] = True, +) -> Zone: + # See the comments for the public APIs from_text() and from_file() for + # details. + + # 'text' can also be a file, but we don't publish that fact + # since it's an implementation detail. The official file + # interface is from_file(). + + if filename is None: + filename = "" + zone = zone_factory(origin, rdclass, relativize=relativize) + with zone.writer(True) as txn: + tok = dns.tokenizer.Tokenizer(text, filename, idna_codec=idna_codec) + reader = dns.zonefile.Reader( + tok, + rdclass, + txn, + allow_include=allow_include, + allow_directives=allow_directives, + ) + try: + reader.read() + except dns.zonefile.UnknownOrigin: + # for backwards compatibility + raise dns.zone.UnknownOrigin + # Now that we're done reading, do some basic checking of the zone. + if check_origin: + zone.check_origin() + return zone + + +def from_text( + text: str, + origin: Optional[Union[dns.name.Name, str]] = None, + rdclass: dns.rdataclass.RdataClass = dns.rdataclass.IN, + relativize: bool = True, + zone_factory: Any = Zone, + filename: Optional[str] = None, + allow_include: bool = False, + check_origin: bool = True, + idna_codec: Optional[dns.name.IDNACodec] = None, + allow_directives: Union[bool, Iterable[str]] = True, +) -> Zone: + """Build a zone object from a zone file format string. + + *text*, a ``str``, the zone file format input. + + *origin*, a ``dns.name.Name``, a ``str``, or ``None``. The origin + of the zone; if not specified, the first ``$ORIGIN`` statement in the + zone file will determine the origin of the zone. + + *rdclass*, a ``dns.rdataclass.RdataClass``, the zone's rdata class; the default is + class IN. + + *relativize*, a ``bool``, determine's whether domain names are + relativized to the zone's origin. The default is ``True``. + + *zone_factory*, the zone factory to use or ``None``. If ``None``, then + ``dns.zone.Zone`` will be used. The value may be any class or callable + that returns a subclass of ``dns.zone.Zone``. + + *filename*, a ``str`` or ``None``, the filename to emit when + describing where an error occurred; the default is ``''``. + + *allow_include*, a ``bool``. If ``True``, the default, then ``$INCLUDE`` + directives are permitted. If ``False``, then encoutering a ``$INCLUDE`` + will raise a ``SyntaxError`` exception. + + *check_origin*, a ``bool``. If ``True``, the default, then sanity + checks of the origin node will be made by calling the zone's + ``check_origin()`` method. + + *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA + encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder + is used. + + *allow_directives*, a ``bool`` or an iterable of `str`. If ``True``, the default, + then directives are permitted, and the *allow_include* parameter controls whether + ``$INCLUDE`` is permitted. If ``False`` or an empty iterable, then no directive + processing is done and any directive-like text will be treated as a regular owner + name. If a non-empty iterable, then only the listed directives (including the + ``$``) are allowed. + + Raises ``dns.zone.NoSOA`` if there is no SOA RRset. + + Raises ``dns.zone.NoNS`` if there is no NS RRset. + + Raises ``KeyError`` if there is no origin node. + + Returns a subclass of ``dns.zone.Zone``. + """ + return _from_text( + text, + origin, + rdclass, + relativize, + zone_factory, + filename, + allow_include, + check_origin, + idna_codec, + allow_directives, + ) + + +def from_file( + f: Any, + origin: Optional[Union[dns.name.Name, str]] = None, + rdclass: dns.rdataclass.RdataClass = dns.rdataclass.IN, + relativize: bool = True, + zone_factory: Any = Zone, + filename: Optional[str] = None, + allow_include: bool = True, + check_origin: bool = True, + idna_codec: Optional[dns.name.IDNACodec] = None, + allow_directives: Union[bool, Iterable[str]] = True, +) -> Zone: + """Read a zone file and build a zone object. + + *f*, a file or ``str``. If *f* is a string, it is treated + as the name of a file to open. + + *origin*, a ``dns.name.Name``, a ``str``, or ``None``. The origin + of the zone; if not specified, the first ``$ORIGIN`` statement in the + zone file will determine the origin of the zone. + + *rdclass*, an ``int``, the zone's rdata class; the default is class IN. + + *relativize*, a ``bool``, determine's whether domain names are + relativized to the zone's origin. The default is ``True``. + + *zone_factory*, the zone factory to use or ``None``. If ``None``, then + ``dns.zone.Zone`` will be used. The value may be any class or callable + that returns a subclass of ``dns.zone.Zone``. + + *filename*, a ``str`` or ``None``, the filename to emit when + describing where an error occurred; the default is ``''``. + + *allow_include*, a ``bool``. If ``True``, the default, then ``$INCLUDE`` + directives are permitted. If ``False``, then encoutering a ``$INCLUDE`` + will raise a ``SyntaxError`` exception. + + *check_origin*, a ``bool``. If ``True``, the default, then sanity + checks of the origin node will be made by calling the zone's + ``check_origin()`` method. + + *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA + encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder + is used. + + *allow_directives*, a ``bool`` or an iterable of `str`. If ``True``, the default, + then directives are permitted, and the *allow_include* parameter controls whether + ``$INCLUDE`` is permitted. If ``False`` or an empty iterable, then no directive + processing is done and any directive-like text will be treated as a regular owner + name. If a non-empty iterable, then only the listed directives (including the + ``$``) are allowed. + + Raises ``dns.zone.NoSOA`` if there is no SOA RRset. + + Raises ``dns.zone.NoNS`` if there is no NS RRset. + + Raises ``KeyError`` if there is no origin node. + + Returns a subclass of ``dns.zone.Zone``. + """ + + if isinstance(f, str): + if filename is None: + filename = f + cm: contextlib.AbstractContextManager = open(f) + else: + cm = contextlib.nullcontext(f) + with cm as f: + return _from_text( + f, + origin, + rdclass, + relativize, + zone_factory, + filename, + allow_include, + check_origin, + idna_codec, + allow_directives, + ) + assert False # make mypy happy lgtm[py/unreachable-statement] + + +def from_xfr( + xfr: Any, + zone_factory: Any = Zone, + relativize: bool = True, + check_origin: bool = True, +) -> Zone: + """Convert the output of a zone transfer generator into a zone object. + + *xfr*, a generator of ``dns.message.Message`` objects, typically + ``dns.query.xfr()``. + + *relativize*, a ``bool``, determine's whether domain names are + relativized to the zone's origin. The default is ``True``. + It is essential that the relativize setting matches the one specified + to the generator. + + *check_origin*, a ``bool``. If ``True``, the default, then sanity + checks of the origin node will be made by calling the zone's + ``check_origin()`` method. + + Raises ``dns.zone.NoSOA`` if there is no SOA RRset. + + Raises ``dns.zone.NoNS`` if there is no NS RRset. + + Raises ``KeyError`` if there is no origin node. + + Raises ``ValueError`` if no messages are yielded by the generator. + + Returns a subclass of ``dns.zone.Zone``. + """ + + z = None + for r in xfr: + if z is None: + if relativize: + origin = r.origin + else: + origin = r.answer[0].name + rdclass = r.answer[0].rdclass + z = zone_factory(origin, rdclass, relativize=relativize) + for rrset in r.answer: + znode = z.nodes.get(rrset.name) + if not znode: + znode = z.node_factory() + z.nodes[rrset.name] = znode + zrds = znode.find_rdataset(rrset.rdclass, rrset.rdtype, rrset.covers, True) + zrds.update_ttl(rrset.ttl) + for rd in rrset: + zrds.add(rd) + if z is None: + raise ValueError("empty transfer") + if check_origin: + z.check_origin() + return z diff --git a/vllm/lib/python3.10/site-packages/dns/zonefile.py b/vllm/lib/python3.10/site-packages/dns/zonefile.py new file mode 100644 index 0000000000000000000000000000000000000000..d74510b29f008c0ab03e16a5ac2bd211c81fde5f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/dns/zonefile.py @@ -0,0 +1,744 @@ +# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license + +# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose with or without fee is hereby granted, +# provided that the above copyright notice and this permission notice +# appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +"""DNS Zones.""" + +import re +import sys +from typing import Any, Iterable, List, Optional, Set, Tuple, Union + +import dns.exception +import dns.grange +import dns.name +import dns.node +import dns.rdata +import dns.rdataclass +import dns.rdatatype +import dns.rdtypes.ANY.SOA +import dns.rrset +import dns.tokenizer +import dns.transaction +import dns.ttl + + +class UnknownOrigin(dns.exception.DNSException): + """Unknown origin""" + + +class CNAMEAndOtherData(dns.exception.DNSException): + """A node has a CNAME and other data""" + + +def _check_cname_and_other_data(txn, name, rdataset): + rdataset_kind = dns.node.NodeKind.classify_rdataset(rdataset) + node = txn.get_node(name) + if node is None: + # empty nodes are neutral. + return + node_kind = node.classify() + if ( + node_kind == dns.node.NodeKind.CNAME + and rdataset_kind == dns.node.NodeKind.REGULAR + ): + raise CNAMEAndOtherData("rdataset type is not compatible with a CNAME node") + elif ( + node_kind == dns.node.NodeKind.REGULAR + and rdataset_kind == dns.node.NodeKind.CNAME + ): + raise CNAMEAndOtherData( + "CNAME rdataset is not compatible with a regular data node" + ) + # Otherwise at least one of the node and the rdataset is neutral, so + # adding the rdataset is ok + + +SavedStateType = Tuple[ + dns.tokenizer.Tokenizer, + Optional[dns.name.Name], # current_origin + Optional[dns.name.Name], # last_name + Optional[Any], # current_file + int, # last_ttl + bool, # last_ttl_known + int, # default_ttl + bool, +] # default_ttl_known + + +def _upper_dollarize(s): + s = s.upper() + if not s.startswith("$"): + s = "$" + s + return s + + +class Reader: + """Read a DNS zone file into a transaction.""" + + def __init__( + self, + tok: dns.tokenizer.Tokenizer, + rdclass: dns.rdataclass.RdataClass, + txn: dns.transaction.Transaction, + allow_include: bool = False, + allow_directives: Union[bool, Iterable[str]] = True, + force_name: Optional[dns.name.Name] = None, + force_ttl: Optional[int] = None, + force_rdclass: Optional[dns.rdataclass.RdataClass] = None, + force_rdtype: Optional[dns.rdatatype.RdataType] = None, + default_ttl: Optional[int] = None, + ): + self.tok = tok + (self.zone_origin, self.relativize, _) = txn.manager.origin_information() + self.current_origin = self.zone_origin + self.last_ttl = 0 + self.last_ttl_known = False + if force_ttl is not None: + default_ttl = force_ttl + if default_ttl is None: + self.default_ttl = 0 + self.default_ttl_known = False + else: + self.default_ttl = default_ttl + self.default_ttl_known = True + self.last_name = self.current_origin + self.zone_rdclass = rdclass + self.txn = txn + self.saved_state: List[SavedStateType] = [] + self.current_file: Optional[Any] = None + self.allowed_directives: Set[str] + if allow_directives is True: + self.allowed_directives = {"$GENERATE", "$ORIGIN", "$TTL"} + if allow_include: + self.allowed_directives.add("$INCLUDE") + elif allow_directives is False: + # allow_include was ignored in earlier releases if allow_directives was + # False, so we continue that. + self.allowed_directives = set() + else: + # Note that if directives are explicitly specified, then allow_include + # is ignored. + self.allowed_directives = set(_upper_dollarize(d) for d in allow_directives) + self.force_name = force_name + self.force_ttl = force_ttl + self.force_rdclass = force_rdclass + self.force_rdtype = force_rdtype + self.txn.check_put_rdataset(_check_cname_and_other_data) + + def _eat_line(self): + while 1: + token = self.tok.get() + if token.is_eol_or_eof(): + break + + def _get_identifier(self): + token = self.tok.get() + if not token.is_identifier(): + raise dns.exception.SyntaxError + return token + + def _rr_line(self): + """Process one line from a DNS zone file.""" + token = None + # Name + if self.force_name is not None: + name = self.force_name + else: + if self.current_origin is None: + raise UnknownOrigin + token = self.tok.get(want_leading=True) + if not token.is_whitespace(): + self.last_name = self.tok.as_name(token, self.current_origin) + else: + token = self.tok.get() + if token.is_eol_or_eof(): + # treat leading WS followed by EOL/EOF as if they were EOL/EOF. + return + self.tok.unget(token) + name = self.last_name + if not name.is_subdomain(self.zone_origin): + self._eat_line() + return + if self.relativize: + name = name.relativize(self.zone_origin) + + # TTL + if self.force_ttl is not None: + ttl = self.force_ttl + self.last_ttl = ttl + self.last_ttl_known = True + else: + token = self._get_identifier() + ttl = None + try: + ttl = dns.ttl.from_text(token.value) + self.last_ttl = ttl + self.last_ttl_known = True + token = None + except dns.ttl.BadTTL: + self.tok.unget(token) + + # Class + if self.force_rdclass is not None: + rdclass = self.force_rdclass + else: + token = self._get_identifier() + try: + rdclass = dns.rdataclass.from_text(token.value) + except dns.exception.SyntaxError: + raise + except Exception: + rdclass = self.zone_rdclass + self.tok.unget(token) + if rdclass != self.zone_rdclass: + raise dns.exception.SyntaxError("RR class is not zone's class") + + if ttl is None: + # support for syntax + token = self._get_identifier() + ttl = None + try: + ttl = dns.ttl.from_text(token.value) + self.last_ttl = ttl + self.last_ttl_known = True + token = None + except dns.ttl.BadTTL: + if self.default_ttl_known: + ttl = self.default_ttl + elif self.last_ttl_known: + ttl = self.last_ttl + self.tok.unget(token) + + # Type + if self.force_rdtype is not None: + rdtype = self.force_rdtype + else: + token = self._get_identifier() + try: + rdtype = dns.rdatatype.from_text(token.value) + except Exception: + raise dns.exception.SyntaxError(f"unknown rdatatype '{token.value}'") + + try: + rd = dns.rdata.from_text( + rdclass, + rdtype, + self.tok, + self.current_origin, + self.relativize, + self.zone_origin, + ) + except dns.exception.SyntaxError: + # Catch and reraise. + raise + except Exception: + # All exceptions that occur in the processing of rdata + # are treated as syntax errors. This is not strictly + # correct, but it is correct almost all of the time. + # We convert them to syntax errors so that we can emit + # helpful filename:line info. + (ty, va) = sys.exc_info()[:2] + raise dns.exception.SyntaxError(f"caught exception {str(ty)}: {str(va)}") + + if not self.default_ttl_known and rdtype == dns.rdatatype.SOA: + # The pre-RFC2308 and pre-BIND9 behavior inherits the zone default + # TTL from the SOA minttl if no $TTL statement is present before the + # SOA is parsed. + self.default_ttl = rd.minimum + self.default_ttl_known = True + if ttl is None: + # if we didn't have a TTL on the SOA, set it! + ttl = rd.minimum + + # TTL check. We had to wait until now to do this as the SOA RR's + # own TTL can be inferred from its minimum. + if ttl is None: + raise dns.exception.SyntaxError("Missing default TTL value") + + self.txn.add(name, ttl, rd) + + def _parse_modify(self, side: str) -> Tuple[str, str, int, int, str]: + # Here we catch everything in '{' '}' in a group so we can replace it + # with ''. + is_generate1 = re.compile(r"^.*\$({(\+|-?)(\d+),(\d+),(.)}).*$") + is_generate2 = re.compile(r"^.*\$({(\+|-?)(\d+)}).*$") + is_generate3 = re.compile(r"^.*\$({(\+|-?)(\d+),(\d+)}).*$") + # Sometimes there are modifiers in the hostname. These come after + # the dollar sign. They are in the form: ${offset[,width[,base]]}. + # Make names + mod = "" + sign = "+" + offset = "0" + width = "0" + base = "d" + g1 = is_generate1.match(side) + if g1: + mod, sign, offset, width, base = g1.groups() + if sign == "": + sign = "+" + else: + g2 = is_generate2.match(side) + if g2: + mod, sign, offset = g2.groups() + if sign == "": + sign = "+" + width = "0" + base = "d" + else: + g3 = is_generate3.match(side) + if g3: + mod, sign, offset, width = g3.groups() + if sign == "": + sign = "+" + base = "d" + + ioffset = int(offset) + iwidth = int(width) + + if sign not in ["+", "-"]: + raise dns.exception.SyntaxError(f"invalid offset sign {sign}") + if base not in ["d", "o", "x", "X", "n", "N"]: + raise dns.exception.SyntaxError(f"invalid type {base}") + + return mod, sign, ioffset, iwidth, base + + def _generate_line(self): + # range lhs [ttl] [class] type rhs [ comment ] + """Process one line containing the GENERATE statement from a DNS + zone file.""" + if self.current_origin is None: + raise UnknownOrigin + + token = self.tok.get() + # Range (required) + try: + start, stop, step = dns.grange.from_text(token.value) + token = self.tok.get() + if not token.is_identifier(): + raise dns.exception.SyntaxError + except Exception: + raise dns.exception.SyntaxError + + # lhs (required) + try: + lhs = token.value + token = self.tok.get() + if not token.is_identifier(): + raise dns.exception.SyntaxError + except Exception: + raise dns.exception.SyntaxError + + # TTL + try: + ttl = dns.ttl.from_text(token.value) + self.last_ttl = ttl + self.last_ttl_known = True + token = self.tok.get() + if not token.is_identifier(): + raise dns.exception.SyntaxError + except dns.ttl.BadTTL: + if not (self.last_ttl_known or self.default_ttl_known): + raise dns.exception.SyntaxError("Missing default TTL value") + if self.default_ttl_known: + ttl = self.default_ttl + elif self.last_ttl_known: + ttl = self.last_ttl + # Class + try: + rdclass = dns.rdataclass.from_text(token.value) + token = self.tok.get() + if not token.is_identifier(): + raise dns.exception.SyntaxError + except dns.exception.SyntaxError: + raise dns.exception.SyntaxError + except Exception: + rdclass = self.zone_rdclass + if rdclass != self.zone_rdclass: + raise dns.exception.SyntaxError("RR class is not zone's class") + # Type + try: + rdtype = dns.rdatatype.from_text(token.value) + token = self.tok.get() + if not token.is_identifier(): + raise dns.exception.SyntaxError + except Exception: + raise dns.exception.SyntaxError(f"unknown rdatatype '{token.value}'") + + # rhs (required) + rhs = token.value + + def _calculate_index(counter: int, offset_sign: str, offset: int) -> int: + """Calculate the index from the counter and offset.""" + if offset_sign == "-": + offset *= -1 + return counter + offset + + def _format_index(index: int, base: str, width: int) -> str: + """Format the index with the given base, and zero-fill it + to the given width.""" + if base in ["d", "o", "x", "X"]: + return format(index, base).zfill(width) + + # base can only be n or N here + hexa = _format_index(index, "x", width) + nibbles = ".".join(hexa[::-1])[:width] + if base == "N": + nibbles = nibbles.upper() + return nibbles + + lmod, lsign, loffset, lwidth, lbase = self._parse_modify(lhs) + rmod, rsign, roffset, rwidth, rbase = self._parse_modify(rhs) + for i in range(start, stop + 1, step): + # +1 because bind is inclusive and python is exclusive + + lindex = _calculate_index(i, lsign, loffset) + rindex = _calculate_index(i, rsign, roffset) + + lzfindex = _format_index(lindex, lbase, lwidth) + rzfindex = _format_index(rindex, rbase, rwidth) + + name = lhs.replace(f"${lmod}", lzfindex) + rdata = rhs.replace(f"${rmod}", rzfindex) + + self.last_name = dns.name.from_text( + name, self.current_origin, self.tok.idna_codec + ) + name = self.last_name + if not name.is_subdomain(self.zone_origin): + self._eat_line() + return + if self.relativize: + name = name.relativize(self.zone_origin) + + try: + rd = dns.rdata.from_text( + rdclass, + rdtype, + rdata, + self.current_origin, + self.relativize, + self.zone_origin, + ) + except dns.exception.SyntaxError: + # Catch and reraise. + raise + except Exception: + # All exceptions that occur in the processing of rdata + # are treated as syntax errors. This is not strictly + # correct, but it is correct almost all of the time. + # We convert them to syntax errors so that we can emit + # helpful filename:line info. + (ty, va) = sys.exc_info()[:2] + raise dns.exception.SyntaxError( + f"caught exception {str(ty)}: {str(va)}" + ) + + self.txn.add(name, ttl, rd) + + def read(self) -> None: + """Read a DNS zone file and build a zone object. + + @raises dns.zone.NoSOA: No SOA RR was found at the zone origin + @raises dns.zone.NoNS: No NS RRset was found at the zone origin + """ + + try: + while 1: + token = self.tok.get(True, True) + if token.is_eof(): + if self.current_file is not None: + self.current_file.close() + if len(self.saved_state) > 0: + ( + self.tok, + self.current_origin, + self.last_name, + self.current_file, + self.last_ttl, + self.last_ttl_known, + self.default_ttl, + self.default_ttl_known, + ) = self.saved_state.pop(-1) + continue + break + elif token.is_eol(): + continue + elif token.is_comment(): + self.tok.get_eol() + continue + elif token.value[0] == "$" and len(self.allowed_directives) > 0: + # Note that we only run directive processing code if at least + # one directive is allowed in order to be backwards compatible + c = token.value.upper() + if c not in self.allowed_directives: + raise dns.exception.SyntaxError( + f"zone file directive '{c}' is not allowed" + ) + if c == "$TTL": + token = self.tok.get() + if not token.is_identifier(): + raise dns.exception.SyntaxError("bad $TTL") + self.default_ttl = dns.ttl.from_text(token.value) + self.default_ttl_known = True + self.tok.get_eol() + elif c == "$ORIGIN": + self.current_origin = self.tok.get_name() + self.tok.get_eol() + if self.zone_origin is None: + self.zone_origin = self.current_origin + self.txn._set_origin(self.current_origin) + elif c == "$INCLUDE": + token = self.tok.get() + filename = token.value + token = self.tok.get() + new_origin: Optional[dns.name.Name] + if token.is_identifier(): + new_origin = dns.name.from_text( + token.value, self.current_origin, self.tok.idna_codec + ) + self.tok.get_eol() + elif not token.is_eol_or_eof(): + raise dns.exception.SyntaxError("bad origin in $INCLUDE") + else: + new_origin = self.current_origin + self.saved_state.append( + ( + self.tok, + self.current_origin, + self.last_name, + self.current_file, + self.last_ttl, + self.last_ttl_known, + self.default_ttl, + self.default_ttl_known, + ) + ) + self.current_file = open(filename) + self.tok = dns.tokenizer.Tokenizer(self.current_file, filename) + self.current_origin = new_origin + elif c == "$GENERATE": + self._generate_line() + else: + raise dns.exception.SyntaxError( + f"Unknown zone file directive '{c}'" + ) + continue + self.tok.unget(token) + self._rr_line() + except dns.exception.SyntaxError as detail: + (filename, line_number) = self.tok.where() + if detail is None: + detail = "syntax error" + ex = dns.exception.SyntaxError( + "%s:%d: %s" % (filename, line_number, detail) + ) + tb = sys.exc_info()[2] + raise ex.with_traceback(tb) from None + + +class RRsetsReaderTransaction(dns.transaction.Transaction): + def __init__(self, manager, replacement, read_only): + assert not read_only + super().__init__(manager, replacement, read_only) + self.rdatasets = {} + + def _get_rdataset(self, name, rdtype, covers): + return self.rdatasets.get((name, rdtype, covers)) + + def _get_node(self, name): + rdatasets = [] + for (rdataset_name, _, _), rdataset in self.rdatasets.items(): + if name == rdataset_name: + rdatasets.append(rdataset) + if len(rdatasets) == 0: + return None + node = dns.node.Node() + node.rdatasets = rdatasets + return node + + def _put_rdataset(self, name, rdataset): + self.rdatasets[(name, rdataset.rdtype, rdataset.covers)] = rdataset + + def _delete_name(self, name): + # First remove any changes involving the name + remove = [] + for key in self.rdatasets: + if key[0] == name: + remove.append(key) + if len(remove) > 0: + for key in remove: + del self.rdatasets[key] + + def _delete_rdataset(self, name, rdtype, covers): + try: + del self.rdatasets[(name, rdtype, covers)] + except KeyError: + pass + + def _name_exists(self, name): + for n, _, _ in self.rdatasets: + if n == name: + return True + return False + + def _changed(self): + return len(self.rdatasets) > 0 + + def _end_transaction(self, commit): + if commit and self._changed(): + rrsets = [] + for (name, _, _), rdataset in self.rdatasets.items(): + rrset = dns.rrset.RRset( + name, rdataset.rdclass, rdataset.rdtype, rdataset.covers + ) + rrset.update(rdataset) + rrsets.append(rrset) + self.manager.set_rrsets(rrsets) + + def _set_origin(self, origin): + pass + + def _iterate_rdatasets(self): + raise NotImplementedError # pragma: no cover + + def _iterate_names(self): + raise NotImplementedError # pragma: no cover + + +class RRSetsReaderManager(dns.transaction.TransactionManager): + def __init__( + self, origin=dns.name.root, relativize=False, rdclass=dns.rdataclass.IN + ): + self.origin = origin + self.relativize = relativize + self.rdclass = rdclass + self.rrsets = [] + + def reader(self): # pragma: no cover + raise NotImplementedError + + def writer(self, replacement=False): + assert replacement is True + return RRsetsReaderTransaction(self, True, False) + + def get_class(self): + return self.rdclass + + def origin_information(self): + if self.relativize: + effective = dns.name.empty + else: + effective = self.origin + return (self.origin, self.relativize, effective) + + def set_rrsets(self, rrsets): + self.rrsets = rrsets + + +def read_rrsets( + text: Any, + name: Optional[Union[dns.name.Name, str]] = None, + ttl: Optional[int] = None, + rdclass: Optional[Union[dns.rdataclass.RdataClass, str]] = dns.rdataclass.IN, + default_rdclass: Union[dns.rdataclass.RdataClass, str] = dns.rdataclass.IN, + rdtype: Optional[Union[dns.rdatatype.RdataType, str]] = None, + default_ttl: Optional[Union[int, str]] = None, + idna_codec: Optional[dns.name.IDNACodec] = None, + origin: Optional[Union[dns.name.Name, str]] = dns.name.root, + relativize: bool = False, +) -> List[dns.rrset.RRset]: + """Read one or more rrsets from the specified text, possibly subject + to restrictions. + + *text*, a file object or a string, is the input to process. + + *name*, a string, ``dns.name.Name``, or ``None``, is the owner name of + the rrset. If not ``None``, then the owner name is "forced", and the + input must not specify an owner name. If ``None``, then any owner names + are allowed and must be present in the input. + + *ttl*, an ``int``, string, or None. If not ``None``, the the TTL is + forced to be the specified value and the input must not specify a TTL. + If ``None``, then a TTL may be specified in the input. If it is not + specified, then the *default_ttl* will be used. + + *rdclass*, a ``dns.rdataclass.RdataClass``, string, or ``None``. If + not ``None``, then the class is forced to the specified value, and the + input must not specify a class. If ``None``, then the input may specify + a class that matches *default_rdclass*. Note that it is not possible to + return rrsets with differing classes; specifying ``None`` for the class + simply allows the user to optionally type a class as that may be convenient + when cutting and pasting. + + *default_rdclass*, a ``dns.rdataclass.RdataClass`` or string. The class + of the returned rrsets. + + *rdtype*, a ``dns.rdatatype.RdataType``, string, or ``None``. If not + ``None``, then the type is forced to the specified value, and the + input must not specify a type. If ``None``, then a type must be present + for each RR. + + *default_ttl*, an ``int``, string, or ``None``. If not ``None``, then if + the TTL is not forced and is not specified, then this value will be used. + if ``None``, then if the TTL is not forced an error will occur if the TTL + is not specified. + + *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA + encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder + is used. Note that codecs only apply to the owner name; dnspython does + not do IDNA for names in rdata, as there is no IDNA zonefile format. + + *origin*, a string, ``dns.name.Name``, or ``None``, is the origin for any + relative names in the input, and also the origin to relativize to if + *relativize* is ``True``. + + *relativize*, a bool. If ``True``, names are relativized to the *origin*; + if ``False`` then any relative names in the input are made absolute by + appending the *origin*. + """ + if isinstance(origin, str): + origin = dns.name.from_text(origin, dns.name.root, idna_codec) + if isinstance(name, str): + name = dns.name.from_text(name, origin, idna_codec) + if isinstance(ttl, str): + ttl = dns.ttl.from_text(ttl) + if isinstance(default_ttl, str): + default_ttl = dns.ttl.from_text(default_ttl) + if rdclass is not None: + rdclass = dns.rdataclass.RdataClass.make(rdclass) + else: + rdclass = None + default_rdclass = dns.rdataclass.RdataClass.make(default_rdclass) + if rdtype is not None: + rdtype = dns.rdatatype.RdataType.make(rdtype) + else: + rdtype = None + manager = RRSetsReaderManager(origin, relativize, default_rdclass) + with manager.writer(True) as txn: + tok = dns.tokenizer.Tokenizer(text, "", idna_codec=idna_codec) + reader = Reader( + tok, + default_rdclass, + txn, + allow_directives=False, + force_name=name, + force_ttl=ttl, + force_rdclass=rdclass, + force_rdtype=rdtype, + default_ttl=default_ttl, + ) + reader.read() + return manager.rrsets diff --git a/vllm/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3228a9820de59d04bcba65898aec42fc3fa59d12 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_definitions.cpython-310.pyc b/vllm/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_definitions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9870b938285c0364ad0c5589a210c80af1b543c Binary files /dev/null and b/vllm/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_definitions.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_utils.cpython-310.pyc b/vllm/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01f3c5c33a250498826405eb2426db1c5181ad33 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/imageio_ffmpeg/__pycache__/_utils.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/imageio_ffmpeg/binaries/__init__.py b/vllm/lib/python3.10/site-packages/imageio_ffmpeg/binaries/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5919de3bbf4cdb56705f83e3cfa63e3c332925dd --- /dev/null +++ b/vllm/lib/python3.10/site-packages/imageio_ffmpeg/binaries/__init__.py @@ -0,0 +1 @@ +# Just here to make importlib.resources work diff --git a/vllm/lib/python3.10/site-packages/imageio_ffmpeg/binaries/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/imageio_ffmpeg/binaries/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc38cf8f69ba1e751ad7780d923e5b3447d063dd Binary files /dev/null and b/vllm/lib/python3.10/site-packages/imageio_ffmpeg/binaries/__pycache__/__init__.cpython-310.pyc differ