Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/agent_utils.h +46 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_resp.h +27 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/request_callback.h +36 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/request_callback_impl.h +65 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rpc.h +13 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rpc_command_base.h +27 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_context.h +339 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_proto.h +164 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_remote_call.h +57 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_remote_call.h +37 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/init.h +11 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/invalid_arguments.h +15 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/nested.h +17 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/numpy_stub.h +21 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_numbers.h +186 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_raii.h +86 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_strings.h +129 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_stub.h +4 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_torch_function_mode.h +25 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/schema_info.h +117 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/structseq.h +11 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_list.h +15 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_numpy.h +27 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_types.h +19 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/throughput_benchmark-inl.h +151 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/torch_dispatch_mode.h +58 -0
- vllm/lib/python3.10/site-packages/dns/_features.py +95 -0
- vllm/lib/python3.10/site-packages/dns/_trio_backend.py +253 -0
- vllm/lib/python3.10/site-packages/dns/asyncbackend.py +101 -0
- vllm/lib/python3.10/site-packages/dns/asyncquery.py +913 -0
- vllm/lib/python3.10/site-packages/dns/enum.py +116 -0
- vllm/lib/python3.10/site-packages/dns/exception.py +169 -0
- vllm/lib/python3.10/site-packages/dns/name.py +1284 -0
- vllm/lib/python3.10/site-packages/dns/tokenizer.py +708 -0
- vllm/lib/python3.10/site-packages/dns/ttl.py +92 -0
- vllm/lib/python3.10/site-packages/httpcore/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/httpcore/__pycache__/_api.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/httpcore/__pycache__/_exceptions.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/httpcore/__pycache__/_ssl.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/httpcore/__pycache__/_synchronization.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/httpcore/__pycache__/_utils.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/httpcore/_async/__init__.py +39 -0
- vllm/lib/python3.10/site-packages/httpcore/_async/__pycache__/connection.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/httpcore/_async/__pycache__/connection_pool.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/httpcore/_async/__pycache__/http11.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/httpcore/_async/__pycache__/http2.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/httpcore/_async/__pycache__/http_proxy.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/httpcore/_async/__pycache__/interfaces.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/httpcore/_async/__pycache__/socks_proxy.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/httpcore/_async/connection.py +222 -0
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/agent_utils.h
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/c10d/PrefixStore.hpp>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/utils.h>
|
| 5 |
+
|
| 6 |
+
namespace torch {
|
| 7 |
+
namespace distributed {
|
| 8 |
+
namespace rpc {
|
| 9 |
+
|
| 10 |
+
// All RPC peers should call into this function at the same time. Each peer
|
| 11 |
+
// provides its own id and name, and this function uses the given Store to
|
| 12 |
+
// gather global name-to-id mapping on all peers.
|
| 13 |
+
TORCH_API std::unordered_map<std::string, worker_id_t> collectNames(
|
| 14 |
+
::c10d::PrefixStore store,
|
| 15 |
+
const worker_id_t selfId,
|
| 16 |
+
const std::string& selfName,
|
| 17 |
+
const int worldSize);
|
| 18 |
+
|
| 19 |
+
// Ranks in dynamic RPC groups will initially call into this to establish the
|
| 20 |
+
// name-to-id mapping for the current peers in the group. The current rank will
|
| 21 |
+
// put its own worker info in the store and discover all the ranks that came
|
| 22 |
+
// before it. NOTE: This needs to be called with the Dynamic RPC group
|
| 23 |
+
// membership management token held.
|
| 24 |
+
TORCH_API std::unordered_map<std::string, worker_id_t> collectCurrentNames(
|
| 25 |
+
::c10d::PrefixStore store,
|
| 26 |
+
const worker_id_t selfId,
|
| 27 |
+
const std::string& selfName);
|
| 28 |
+
|
| 29 |
+
// Remove name frmo Store, used in dynamic RPC groups.
|
| 30 |
+
// NOTE: This needs to be called with the Dynamic RPC group
|
| 31 |
+
// membership management token held.
|
| 32 |
+
TORCH_API void removeCurrentName(
|
| 33 |
+
::c10d::PrefixStore store,
|
| 34 |
+
const worker_id_t selfId,
|
| 35 |
+
const std::string& selfName);
|
| 36 |
+
|
| 37 |
+
// This performs a synchronization of all call counts by using store.
|
| 38 |
+
// All RPC peers wait for others to join to exit at the same time.
|
| 39 |
+
TORCH_API int syncCallCount(
|
| 40 |
+
::c10d::PrefixStore store,
|
| 41 |
+
const int worldSize,
|
| 42 |
+
int activeCalls = 0);
|
| 43 |
+
|
| 44 |
+
} // namespace rpc
|
| 45 |
+
} // namespace distributed
|
| 46 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_resp.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 5 |
+
|
| 6 |
+
namespace torch {
|
| 7 |
+
namespace distributed {
|
| 8 |
+
namespace rpc {
|
| 9 |
+
|
| 10 |
+
// RPC call representing the response of a Python UDF over RPC.
|
| 11 |
+
class TORCH_API PythonResp final : public RpcCommandBase {
|
| 12 |
+
public:
|
| 13 |
+
explicit PythonResp(SerializedPyObj&& serializedPyObj);
|
| 14 |
+
|
| 15 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 16 |
+
|
| 17 |
+
static std::unique_ptr<PythonResp> fromMessage(const Message& message);
|
| 18 |
+
|
| 19 |
+
const SerializedPyObj& serializedPyObj() const;
|
| 20 |
+
|
| 21 |
+
private:
|
| 22 |
+
SerializedPyObj serializedPyObj_;
|
| 23 |
+
};
|
| 24 |
+
|
| 25 |
+
} // namespace rpc
|
| 26 |
+
} // namespace distributed
|
| 27 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/request_callback.h
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 4 |
+
|
| 5 |
+
namespace torch {
|
| 6 |
+
namespace distributed {
|
| 7 |
+
namespace rpc {
|
| 8 |
+
|
| 9 |
+
// Functor which is invoked to process an RPC message. This is an abstract class
|
| 10 |
+
// with some common functionality across all request handlers. Users need to
|
| 11 |
+
// implement this interface to perform the actual business logic.
|
| 12 |
+
class TORCH_API RequestCallback {
|
| 13 |
+
public:
|
| 14 |
+
// Invoke the callback.
|
| 15 |
+
c10::intrusive_ptr<JitFuture> operator()(
|
| 16 |
+
Message& request,
|
| 17 |
+
std::vector<c10::Stream> streams) const;
|
| 18 |
+
|
| 19 |
+
virtual ~RequestCallback() = default;
|
| 20 |
+
|
| 21 |
+
protected:
|
| 22 |
+
// RpcAgent implementation should invoke ``RequestCallback`` to process
|
| 23 |
+
// received requests. There is no restriction on the implementation's
|
| 24 |
+
// threading model. This function takes an rvalue reference of the Message
|
| 25 |
+
// object. It is expected to return the future to a response message or
|
| 26 |
+
// message containing an exception. Different rpc agent implementations are
|
| 27 |
+
// expected to ensure delivery of the response/exception based on their
|
| 28 |
+
// implementation specific mechanisms.
|
| 29 |
+
virtual c10::intrusive_ptr<JitFuture> processMessage(
|
| 30 |
+
Message& request,
|
| 31 |
+
std::vector<c10::Stream> streams) const = 0;
|
| 32 |
+
};
|
| 33 |
+
|
| 34 |
+
} // namespace rpc
|
| 35 |
+
} // namespace distributed
|
| 36 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/request_callback_impl.h
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/request_callback_no_python.h>
|
| 5 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 6 |
+
#include <torch/csrc/jit/python/pybind.h>
|
| 7 |
+
|
| 8 |
+
namespace torch {
|
| 9 |
+
namespace distributed {
|
| 10 |
+
namespace rpc {
|
| 11 |
+
|
| 12 |
+
class TORCH_API RequestCallbackImpl : public RequestCallbackNoPython {
|
| 13 |
+
public:
|
| 14 |
+
std::unique_ptr<RpcCommandBase> deserializePythonRpcCommand(
|
| 15 |
+
std::unique_ptr<RpcCommandBase> rpc,
|
| 16 |
+
const MessageType& messageType) const override;
|
| 17 |
+
|
| 18 |
+
c10::intrusive_ptr<JitFuture> processPythonCall(
|
| 19 |
+
RpcCommandBase& rpc,
|
| 20 |
+
std::vector<c10::Stream> streams) const override;
|
| 21 |
+
|
| 22 |
+
c10::intrusive_ptr<JitFuture> processScriptCall(
|
| 23 |
+
RpcCommandBase& rpc,
|
| 24 |
+
std::vector<c10::Stream> streams) const override;
|
| 25 |
+
|
| 26 |
+
c10::intrusive_ptr<JitFuture> processScriptRemoteCall(
|
| 27 |
+
RpcCommandBase& rpc,
|
| 28 |
+
std::vector<c10::Stream> streams) const override;
|
| 29 |
+
|
| 30 |
+
c10::intrusive_ptr<JitFuture> processPythonRemoteCall(
|
| 31 |
+
RpcCommandBase& rpc,
|
| 32 |
+
std::vector<c10::Stream> streams) const override;
|
| 33 |
+
|
| 34 |
+
c10::intrusive_ptr<JitFuture> processPythonRRefFetchCall(
|
| 35 |
+
RpcCommandBase& rpc) const override;
|
| 36 |
+
|
| 37 |
+
void handleRRefDelete(c10::intrusive_ptr<RRef>& rref) const override;
|
| 38 |
+
|
| 39 |
+
c10::intrusive_ptr<JitFuture> processRpcWithErrors(
|
| 40 |
+
RpcCommandBase& rpc,
|
| 41 |
+
const MessageType& messageType,
|
| 42 |
+
std::vector<c10::Stream> streams) const override;
|
| 43 |
+
|
| 44 |
+
bool cudaAvailable() const override;
|
| 45 |
+
|
| 46 |
+
c10::intrusive_ptr<JitFuture> processRRefBackward(
|
| 47 |
+
RpcCommandBase& rpc) const override;
|
| 48 |
+
|
| 49 |
+
// Helpers to run user-defined functions, operators and other computations.
|
| 50 |
+
|
| 51 |
+
c10::intrusive_ptr<JitFuture> runJitFunction(
|
| 52 |
+
const c10::QualifiedName& name,
|
| 53 |
+
std::vector<at::IValue>& stack,
|
| 54 |
+
std::vector<c10::Stream> streams,
|
| 55 |
+
bool isAsyncExecution) const;
|
| 56 |
+
|
| 57 |
+
c10::intrusive_ptr<JitFuture> runPythonFunction(
|
| 58 |
+
const py::object& function,
|
| 59 |
+
std::vector<c10::Stream> streams,
|
| 60 |
+
bool isAsyncExecution) const;
|
| 61 |
+
};
|
| 62 |
+
|
| 63 |
+
} // namespace rpc
|
| 64 |
+
} // namespace distributed
|
| 65 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rpc.h
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/python_headers.h>
|
| 4 |
+
|
| 5 |
+
namespace torch {
|
| 6 |
+
namespace distributed {
|
| 7 |
+
namespace rpc {
|
| 8 |
+
|
| 9 |
+
PyMethodDef* python_functions();
|
| 10 |
+
|
| 11 |
+
} // namespace rpc
|
| 12 |
+
} // namespace distributed
|
| 13 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rpc_command_base.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 5 |
+
|
| 6 |
+
namespace torch {
|
| 7 |
+
namespace distributed {
|
| 8 |
+
namespace rpc {
|
| 9 |
+
|
| 10 |
+
// Base class for all RPC request and responses.
|
| 11 |
+
class RpcCommandBase {
|
| 12 |
+
public:
|
| 13 |
+
// Need to override this to serialize the RPC. This should destructively
|
| 14 |
+
// create a message for the RPC (Hence the &&).
|
| 15 |
+
c10::intrusive_ptr<Message> toMessage() && {
|
| 16 |
+
JitRRefPickleGuard jitPickleGuard;
|
| 17 |
+
return std::move(*this).toMessageImpl();
|
| 18 |
+
}
|
| 19 |
+
virtual c10::intrusive_ptr<Message> toMessageImpl() && = 0;
|
| 20 |
+
virtual ~RpcCommandBase() = 0;
|
| 21 |
+
};
|
| 22 |
+
|
| 23 |
+
inline RpcCommandBase::~RpcCommandBase() = default;
|
| 24 |
+
|
| 25 |
+
} // namespace rpc
|
| 26 |
+
} // namespace distributed
|
| 27 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_context.h
ADDED
|
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/util/Optional.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 5 |
+
#include <torch/csrc/distributed/rpc/rpc_agent.h>
|
| 6 |
+
#include <torch/csrc/distributed/rpc/rref_impl.h>
|
| 7 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 8 |
+
#include <torch/csrc/distributed/rpc/utils.h>
|
| 9 |
+
|
| 10 |
+
#include <atomic>
|
| 11 |
+
|
| 12 |
+
namespace torch {
|
| 13 |
+
namespace distributed {
|
| 14 |
+
namespace rpc {
|
| 15 |
+
|
| 16 |
+
namespace callback {
|
| 17 |
+
// It's the callback for RemoteCall.
|
| 18 |
+
void TORCH_API
|
| 19 |
+
confirmPendingUser(const JitFuture& jitFuture, const ForkId& expectedForkId);
|
| 20 |
+
|
| 21 |
+
// It's the callback for finishing creating owner rref, it returned deletedRRef,
|
| 22 |
+
// so that the deletedRRef can be handled under GIL in python_functions.cpp if
|
| 23 |
+
// deletedRRef contains python object.
|
| 24 |
+
c10::intrusive_ptr<RRef> TORCH_API
|
| 25 |
+
finishCreatingOwnerRRef(const JitFuture& jitFuture, const RRefId& rrefId);
|
| 26 |
+
} // namespace callback
|
| 27 |
+
|
| 28 |
+
// Manages RRef lifetime and keeps track of RRef forks.
|
| 29 |
+
class TORCH_API RRefContext {
|
| 30 |
+
public:
|
| 31 |
+
static RRefContext& getInstance();
|
| 32 |
+
// NB: This method must be called before destructing RRefContext singleton.
|
| 33 |
+
// Similar to delForkOfOwner, this method returns a vector of OwnerRRefs that
|
| 34 |
+
// hold py::object. The call-site is also responsible for resetting those
|
| 35 |
+
// shared_ptr objects with a GIL. See comments at delForkOfOwner() for more
|
| 36 |
+
// details.
|
| 37 |
+
static std::vector<c10::intrusive_ptr<RRef>> destroyInstance(
|
| 38 |
+
bool ignoreRRefLeak = true);
|
| 39 |
+
|
| 40 |
+
static void handleException(const JitFuture& jitFuture);
|
| 41 |
+
|
| 42 |
+
// handle exception without throw ::c10::Error again
|
| 43 |
+
static void handleExceptionSilent(const JitFuture& jitFuture);
|
| 44 |
+
|
| 45 |
+
RRefContext(const RRefContext&) = delete;
|
| 46 |
+
RRefContext(RRefContext&& other) = delete;
|
| 47 |
+
void operator=(const RRefContext&) = delete;
|
| 48 |
+
RRefContext& operator=(RRefContext&& other) = delete;
|
| 49 |
+
|
| 50 |
+
~RRefContext();
|
| 51 |
+
|
| 52 |
+
// get the worker id of the current worker
|
| 53 |
+
inline worker_id_t getWorkerId() const {
|
| 54 |
+
return agent_->getWorkerInfo().id_;
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
// get the worker name of the current worker
|
| 58 |
+
inline const std::string& getWorkerName() const {
|
| 59 |
+
return agent_->getWorkerInfo().name_;
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
// generate a globally unique ID
|
| 63 |
+
inline GloballyUniqueId genGloballyUniqueId() {
|
| 64 |
+
return GloballyUniqueId(getWorkerId(), nextLocalId_++);
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
inline const std::shared_ptr<RpcAgent>& agent() const {
|
| 68 |
+
return agent_;
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
// create a ``UserRRef`` owned by the worker ``ownerId``
|
| 72 |
+
c10::intrusive_ptr<UserRRef> createUserRRef(
|
| 73 |
+
worker_id_t ownerId,
|
| 74 |
+
const TypePtr& type);
|
| 75 |
+
|
| 76 |
+
// Convert an RRefForkData into an RRef. This RRef could be user or owner.
|
| 77 |
+
// This RRef could have already existed before, or could be created in this
|
| 78 |
+
// method, we pass type here to validate or help the rref creation.
|
| 79 |
+
c10::intrusive_ptr<RRef> getOrCreateRRef(
|
| 80 |
+
const RRefForkData& rfd,
|
| 81 |
+
const TypePtr& type);
|
| 82 |
+
|
| 83 |
+
// Get the ``OwnerRRef`` of id ``rrefId``. If it does not exist, create a new
|
| 84 |
+
// one. This function is called in two places:
|
| 85 |
+
// 1. when processing ``rpc.remote()``, i.e., ``SCRIPT_REMOTE_CALL``
|
| 86 |
+
// ``PYTHON_REMOTE_CALL``.
|
| 87 |
+
// 2. when unpickling ``OwnerRRef``.
|
| 88 |
+
// What's common in these two cases are, 1) the RRefId is already generated
|
| 89 |
+
// 2) the TypePtr is presented. So it can always create the ``OwnerRRef`` if
|
| 90 |
+
// it is not yet available.
|
| 91 |
+
c10::intrusive_ptr<OwnerRRef> getOrCreateOwnerRRef(
|
| 92 |
+
const RRefId& rrefId,
|
| 93 |
+
const TypePtr& type);
|
| 94 |
+
|
| 95 |
+
// Create an empty owner rref of type.
|
| 96 |
+
// This method is called to first time generate an ``OwnerRRef``, e.g.,
|
| 97 |
+
// 1) ``rpc.RRef(obj)``
|
| 98 |
+
// 2) create the ``OwnerRRef`` on `rpc.remote()` caller side.
|
| 99 |
+
// What's common in these two cases are, 1) the RRefId hasn't been generated
|
| 100 |
+
// 2) the TypePtr is presented.
|
| 101 |
+
c10::intrusive_ptr<OwnerRRef> createOwnerRRef(const TypePtr& type);
|
| 102 |
+
|
| 103 |
+
// Returns a Future of the OwnerRRef, which will be marked completed when
|
| 104 |
+
// ``OwnerRRef`` is created. This method is used when the TypePtr is not
|
| 105 |
+
// available, e.g., when processing to_here(). The forceCreated flag can be
|
| 106 |
+
// used to ensure that the rref is created on the owner, otherwise throw in
|
| 107 |
+
// cases where the user of this API expects this to return a completed future.
|
| 108 |
+
// Note that the return value is a intrusive_ptr to a c10::ivalue::Future that
|
| 109 |
+
// holds the RRef.
|
| 110 |
+
c10::intrusive_ptr<JitFuture> getOwnerRRef(
|
| 111 |
+
const RRefId& rrefId,
|
| 112 |
+
bool forceCreated = false);
|
| 113 |
+
|
| 114 |
+
// Adding the RRefId of an OwnerRRef into the forks_ map. This is useful when
|
| 115 |
+
// making a remote call to self, which as for now, still goes through serde
|
| 116 |
+
// and invokes request callback. In this case, the OwnerRRef has already been
|
| 117 |
+
// created on the send side, and we need to pass it to the receive side,
|
| 118 |
+
// instead of creating a new OwnerRRef. This is done by adding the OwnerRRef
|
| 119 |
+
// into owners_. However, that alone is not enough, as it could be deleted
|
| 120 |
+
// when all UserRRef die, which would then remove the OwnerRRef from owners_
|
| 121 |
+
// and this could happen before the self remote call finishes. To prevent
|
| 122 |
+
// that, this API adds the RRefId as a ForkId, which will then delete the
|
| 123 |
+
// ForkId when the self remote is done.
|
| 124 |
+
void addSelfAsFork(c10::intrusive_ptr<OwnerRRef>& rref);
|
| 125 |
+
|
| 126 |
+
// Register a fork of the ``OwnerRRef``, and inserts a intrusive_ptr of the
|
| 127 |
+
// ``OwnerRRef`` in a map to keep it alive.
|
| 128 |
+
void addForkOfOwner(const RRefId& rrefId, const ForkId& forkId);
|
| 129 |
+
// Performs the same function as addForkOfOwner but ignores duplicate
|
| 130 |
+
// requests. This idempotent function is used with RREF_FORK_REQUEST calls,
|
| 131 |
+
// whereas all other message types use the non-idempotent variant.
|
| 132 |
+
void addForkOfOwnerIfNotPresent(const RRefId& rrefId, const ForkId& forkId);
|
| 133 |
+
// Delete a fork of the ``OwnerRRef``. NB: this could trigger deletion on the
|
| 134 |
+
// IValue or py::object. For the later, this method will acquire GIL.
|
| 135 |
+
// NB: If this fork deletion triggered deleting OwnerRRef, this method will
|
| 136 |
+
// return a shared_ptr to the OwnerRRef, which is likely to be the last
|
| 137 |
+
// shared_ptr instance for it. Therefore, deleting this shared_ptr<OwnerRRef>
|
| 138 |
+
// will also trigger deleting the object it points to. If OwnerRRef holds a
|
| 139 |
+
// py::object, deleting it require GIL. The call site should guarded it with
|
| 140 |
+
// a GIL and reset the shared_ptr. The GIL-guarded deletion is intentionally
|
| 141 |
+
// left out of this function to avoid creating dependency on pybind.
|
| 142 |
+
c10::intrusive_ptr<RRef> delForkOfOwner(
|
| 143 |
+
const RRefId& rrefId,
|
| 144 |
+
const ForkId& forkId);
|
| 145 |
+
|
| 146 |
+
// Invoked when pickling an RRef to setup child/fork properly
|
| 147 |
+
RRefForkData prepareChildFork(const c10::intrusive_ptr<RRef>& rref);
|
| 148 |
+
// Invoked when unpickling an RRef to send RREF_FORK_REQUEST to owner and
|
| 149 |
+
// send RREF_CHILD_ACCEPT to the parent.
|
| 150 |
+
// NB: forkId is necessary here as the rref could be an OwnerRRef
|
| 151 |
+
void notifyOwnerAndParentOfFork(
|
| 152 |
+
const ForkId& forkId,
|
| 153 |
+
worker_id_t parent,
|
| 154 |
+
const c10::intrusive_ptr<RRef>& rref);
|
| 155 |
+
|
| 156 |
+
// When a UserRRef is forked to another worker (user or owner), it is added
|
| 157 |
+
// into pendingChildren_ to be held alive until it receives RREF_CHILD_ACCEPT
|
| 158 |
+
// from the child.
|
| 159 |
+
// NB: This is necessary for both user and owner child. As we do not have FIFO
|
| 160 |
+
// communication between workers, we need this strategy to make sure that all
|
| 161 |
+
// previously submitted rpc/remote calls are acked before sending out the
|
| 162 |
+
// RREF_USER_DELETE message. Otherwise, the OwnerRRef could be deleted too
|
| 163 |
+
// soon.
|
| 164 |
+
void addPendingChild(
|
| 165 |
+
const ForkId& forkId,
|
| 166 |
+
const c10::intrusive_ptr<RRef>& rref);
|
| 167 |
+
void delPendingChild(const ForkId& forkId);
|
| 168 |
+
|
| 169 |
+
// When a UserRRef is created, it is added into pendingUsers_ to be held alive
|
| 170 |
+
// until it receives RREF_USER_ACCEPT from the owner.
|
| 171 |
+
void addPendingUser(
|
| 172 |
+
const ForkId& forkId,
|
| 173 |
+
const c10::intrusive_ptr<RRef>& rref);
|
| 174 |
+
void delPendingUser(const ForkId& forkId);
|
| 175 |
+
void addConfirmedUser(
|
| 176 |
+
const ForkId& forkId,
|
| 177 |
+
const c10::intrusive_ptr<RRef>& rref);
|
| 178 |
+
|
| 179 |
+
// Retrieve a pending user given the fork ID. Throws if the user has already
|
| 180 |
+
// been confirmed (i.e. is no longer in the pendingUsers_ map).
|
| 181 |
+
c10::intrusive_ptr<RRef> getPendingUser(const ForkId& forkId);
|
| 182 |
+
|
| 183 |
+
// Start recording new pending UserRRefs. All pending UserRRefs introduced
|
| 184 |
+
// after this point will be put into the thread_local userTable_, which will
|
| 185 |
+
// then be consumed and cleared in waitForThreadLocalPendingRRefs().
|
| 186 |
+
void recordThreadLocalPendingRRefs();
|
| 187 |
+
// End recording new pending UserRRefs, and clear the thread_local userTable_.
|
| 188 |
+
// Returns a Future which will be marked as completed when all pending
|
| 189 |
+
// UserRRefs in the current userTable_ are confirmed by their owners. The bool
|
| 190 |
+
// value in the Future is unused.
|
| 191 |
+
// This method is useful to make sure RRefs in user function arguments are
|
| 192 |
+
// confirmed before launching user code.
|
| 193 |
+
// NB: Callers of this method does not need to keep the returned Future alive,
|
| 194 |
+
// because this Future is already captured in callbacks of the
|
| 195 |
+
// PendingUserState. If there is no pending UserRRefs, this method returns a
|
| 196 |
+
// completed future.
|
| 197 |
+
c10::intrusive_ptr<JitFuture> waitForThreadLocalPendingRRefs();
|
| 198 |
+
// Only call this function when there are errors during a recording session,
|
| 199 |
+
// and it is likely that waitForThreadLocalPendingRRefs() cannot be invoked
|
| 200 |
+
// properly.
|
| 201 |
+
// TODO: make this a context guard
|
| 202 |
+
void clearRecordedPendingRRefsOnError();
|
| 203 |
+
|
| 204 |
+
void delUser(
|
| 205 |
+
const worker_id_t owner,
|
| 206 |
+
const RRefId& rrefId,
|
| 207 |
+
const ForkId& forkId);
|
| 208 |
+
void delAllUsersAndUnforkedOwners(std::chrono::milliseconds timeoutMillis);
|
| 209 |
+
|
| 210 |
+
std::unordered_map<std::string, std::string> getDebugInfo();
|
| 211 |
+
|
| 212 |
+
private:
|
| 213 |
+
struct PendingUserState {
|
| 214 |
+
PendingUserState(c10::intrusive_ptr<RRef> rref)
|
| 215 |
+
: rref_(std::move(rref)),
|
| 216 |
+
confirmationFuture_(c10::make_intrusive<JitFuture>(BoolType::get())) {
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
inline void confirm() {
|
| 220 |
+
c10::static_intrusive_pointer_cast<UserRRef>(rref_)->confirm();
|
| 221 |
+
confirmationFuture_->markCompleted();
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
c10::intrusive_ptr<RRef> rref_;
|
| 225 |
+
// Use Future.wait() and Future.markCompleted() to block and unblock user
|
| 226 |
+
// functions. The bool value wrapped by the future_ is not used.
|
| 227 |
+
c10::intrusive_ptr<JitFuture> confirmationFuture_;
|
| 228 |
+
};
|
| 229 |
+
|
| 230 |
+
RRefContext(std::shared_ptr<RpcAgent>);
|
| 231 |
+
|
| 232 |
+
c10::intrusive_ptr<UserRRef> createUserRRef(
|
| 233 |
+
worker_id_t ownerId,
|
| 234 |
+
const RRefId& rrefId,
|
| 235 |
+
const ForkId& forkId,
|
| 236 |
+
const TypePtr& type);
|
| 237 |
+
|
| 238 |
+
void finishForkRequest(const ForkId& forkId, worker_id_t parent);
|
| 239 |
+
|
| 240 |
+
// If there is any leak on any RRef, this method will throw an error.
|
| 241 |
+
void checkRRefLeaks(bool ignoreRRefLeak);
|
| 242 |
+
|
| 243 |
+
static std::atomic<local_id_t> nextLocalId_;
|
| 244 |
+
|
| 245 |
+
const std::shared_ptr<RpcAgent> agent_;
|
| 246 |
+
mutable std::mutex mutex_;
|
| 247 |
+
// Keep OwnerRRefs alive until there is no living UserRRefs.
|
| 248 |
+
std::unordered_map<RRefId, c10::intrusive_ptr<RRef>, RRefId::Hash> owners_;
|
| 249 |
+
// A map to track OwnerRRefs that are requested but not yet created. This can
|
| 250 |
+
// happen if the to_here() message is processed on the owner before the
|
| 251 |
+
// corresponding creator rpc.remote() message. If this happens, instead of
|
| 252 |
+
// to_here() RPC thread to block waiting for the OwnerRRef creation, the
|
| 253 |
+
// RRefContext returns a Future, so that the RPC request processing logic can
|
| 254 |
+
// attach subsequent code as a callback to that Future.
|
| 255 |
+
// NB: the OwnerRRefs in this map must be cleared when the corresponding
|
| 256 |
+
// OwnerRRef is created. Note that the values in this map are intrusive_ptrs
|
| 257 |
+
// to c10::ivalue::Future that will be marked completed with the owner RRef.
|
| 258 |
+
std::unordered_map<RRefId, c10::intrusive_ptr<JitFuture>, RRefId::Hash>
|
| 259 |
+
pendingOwners_;
|
| 260 |
+
// Tracks known living UserRRefs of an OwnerRRef
|
| 261 |
+
std::unordered_map<
|
| 262 |
+
RRefId,
|
| 263 |
+
std::unordered_set<ForkId, ForkId::Hash>,
|
| 264 |
+
RRefId::Hash>
|
| 265 |
+
forks_;
|
| 266 |
+
|
| 267 |
+
// This cond var is used by deleteAllUsers(), a event notification is sent if
|
| 268 |
+
// number of pending UserRRef or UserRRef children is reduced, or
|
| 269 |
+
// number of owned OwnerRRef is reduced.
|
| 270 |
+
std::condition_variable deleteAllUsersCV_;
|
| 271 |
+
// The follow 3 maps keep UserRRefs alive by holding a intrusive_ptr to the
|
| 272 |
+
// RRef instances. A UserRRef must be added into this map if any of the
|
| 273 |
+
// following two conditions is true:
|
| 274 |
+
//
|
| 275 |
+
// (1) A UserRRef has not been accepted by owner yet.
|
| 276 |
+
//
|
| 277 |
+
// It can be used or shared, but cannot be deleted, and hence kept alive
|
| 278 |
+
// in this map. A message of type RREF_USER_ACCEPT will move the
|
| 279 |
+
// corresponding RRef from pendingUsers_ map to confirmedUsers_ map.
|
| 280 |
+
std::unordered_map<ForkId, std::shared_ptr<PendingUserState>, ForkId::Hash>
|
| 281 |
+
pendingUsers_;
|
| 282 |
+
// UserRRefs are added into this map when it is confirmed by the owner.
|
| 283 |
+
// When destroying RRefContext this map helps to find local UserRRefs
|
| 284 |
+
// and send delete messages if they are still not deleted by Python
|
| 285 |
+
// garbage collection.
|
| 286 |
+
std::unordered_map<ForkId, c10::weak_intrusive_ptr<RRef>, ForkId::Hash>
|
| 287 |
+
confirmedUsers_;
|
| 288 |
+
|
| 289 |
+
// (2) A UserRRef has forked a child UserRRef which has not been accepted by
|
| 290 |
+
// the owner yet.
|
| 291 |
+
//
|
| 292 |
+
// In this case, this UserRRef cannot send out RREF_USER_DELETE message,
|
| 293 |
+
// as it could potentially trigger the OwnerRRef been deleted before the
|
| 294 |
+
// owner learns about the forked child.
|
| 295 |
+
std::unordered_map<ForkId, c10::intrusive_ptr<RRef>, ForkId::Hash>
|
| 296 |
+
pendingChildren_;
|
| 297 |
+
|
| 298 |
+
// The RRef context performs its operations through async RPC requests, in
|
| 299 |
+
// order to not block the user code. Therefore the RRef context's state may be
|
| 300 |
+
// lagging a bit behind what it is intended to be, while it waits for these
|
| 301 |
+
// requests to complete. To allow syncing when needed, we store the count of
|
| 302 |
+
// these pending requests, so that users can wait for it to reach zero.
|
| 303 |
+
std::atomic<int64_t> numPendingFutures_{0};
|
| 304 |
+
|
| 305 |
+
std::mutex destroyedMutex_;
|
| 306 |
+
bool destroyed_{false};
|
| 307 |
+
|
| 308 |
+
// Thread local states to keep UserRRefs deserialized from user function
|
| 309 |
+
// arguments.
|
| 310 |
+
static thread_local std::vector<std::shared_ptr<PendingUserState>> userTable_;
|
| 311 |
+
// A flag indicating whether subsequently created UserRRefs should be added to
|
| 312 |
+
// the thread_local userTable_. The flag is set to true before serializing
|
| 313 |
+
// RPC arguments and then set to false before running the corresponding
|
| 314 |
+
// user code. See addPendingUser and delPendingUser for more details.
|
| 315 |
+
// NB: The reason for having this flag is because addPendingUser are called in
|
| 316 |
+
// two cases, and we only want to track the 2nd case.
|
| 317 |
+
// (1) RRef as the return value: when calling rpc.remote, the UserRRef on the
|
| 318 |
+
// caller side is added to the context using addPendingUser.
|
| 319 |
+
// (2) RRef as an argument: When running an RPC using RRefs as arguments, the
|
| 320 |
+
// RRef is forwarded to the callee as new UserRRefs (if the callee is not
|
| 321 |
+
// the owner). In this case, we block running the user function until all
|
| 322 |
+
// UserRRefs are confirmed by the owner.
|
| 323 |
+
// This contract gurantees that no UserRRefs can be used remotely without
|
| 324 |
+
// confirmation. Note that, however, the UserRRef created by rpc.remote can
|
| 325 |
+
// still be passed to local functions as arguments and used there. This is by
|
| 326 |
+
// design, because this feature is especially useful when, say a master node
|
| 327 |
+
// creates multiple UserRRefs in a loop and then shares them with other nodes.
|
| 328 |
+
// Blocking every iteration in the loop until RRefs are confirmed will slow
|
| 329 |
+
// this down. This nuance on UserRRef can be interpreted as we only make
|
| 330 |
+
// exceptions for UserRRef creators. And using the UserRRef on its creator
|
| 331 |
+
// without confirmation is OK, because the creator would either call to_here
|
| 332 |
+
// or forward the UserRRef, and both would then require confirmations from the
|
| 333 |
+
// owner.
|
| 334 |
+
static thread_local bool recording_;
|
| 335 |
+
};
|
| 336 |
+
|
| 337 |
+
} // namespace rpc
|
| 338 |
+
} // namespace distributed
|
| 339 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_proto.h
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 5 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 6 |
+
#include <torch/csrc/jit/runtime/operator.h>
|
| 7 |
+
#include <torch/csrc/jit/serialization/pickler.h>
|
| 8 |
+
#include <vector>
|
| 9 |
+
|
| 10 |
+
namespace torch {
|
| 11 |
+
namespace distributed {
|
| 12 |
+
namespace rpc {
|
| 13 |
+
|
| 14 |
+
// Temporary solution of RRef operations.
|
| 15 |
+
// TODO: Remove all these messages and use rpc + registered functions instead.
|
| 16 |
+
class TORCH_API RRefMessageBase : public RpcCommandBase {
|
| 17 |
+
public:
|
| 18 |
+
RRefMessageBase(const RRefId& rrefId, MessageType type)
|
| 19 |
+
: rrefId_(rrefId), type_(type) {}
|
| 20 |
+
|
| 21 |
+
const RRefId& rrefId();
|
| 22 |
+
|
| 23 |
+
protected:
|
| 24 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 25 |
+
const RRefId rrefId_;
|
| 26 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 27 |
+
const MessageType type_;
|
| 28 |
+
};
|
| 29 |
+
|
| 30 |
+
class TORCH_API ForkMessageBase : public RRefMessageBase {
|
| 31 |
+
public:
|
| 32 |
+
ForkMessageBase(const RRefId& rrefId, const ForkId& forkId, MessageType type)
|
| 33 |
+
: RRefMessageBase(rrefId, type), forkId_(forkId) {}
|
| 34 |
+
|
| 35 |
+
const ForkId& forkId();
|
| 36 |
+
|
| 37 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 38 |
+
static std::pair<RRefId, ForkId> fromMessage(
|
| 39 |
+
const Message& message,
|
| 40 |
+
MessageType type);
|
| 41 |
+
|
| 42 |
+
protected:
|
| 43 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 44 |
+
const ForkId forkId_;
|
| 45 |
+
};
|
| 46 |
+
|
| 47 |
+
// UserRRef uses this message to fetch the remote RRef value from the owner.
|
| 48 |
+
class TORCH_API ScriptRRefFetchCall final : public RRefMessageBase {
|
| 49 |
+
public:
|
| 50 |
+
ScriptRRefFetchCall(worker_id_t fromWorkerId, const RRefId& rrefId)
|
| 51 |
+
: RRefMessageBase(rrefId, MessageType::SCRIPT_RREF_FETCH_CALL),
|
| 52 |
+
fromWorkerId_(fromWorkerId) {}
|
| 53 |
+
|
| 54 |
+
inline worker_id_t fromWorkerId() const {
|
| 55 |
+
return fromWorkerId_;
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 59 |
+
static std::unique_ptr<ScriptRRefFetchCall> fromMessage(
|
| 60 |
+
const Message& message);
|
| 61 |
+
|
| 62 |
+
private:
|
| 63 |
+
const worker_id_t fromWorkerId_;
|
| 64 |
+
};
|
| 65 |
+
|
| 66 |
+
class TORCH_API PythonRRefFetchCall final : public RRefMessageBase {
|
| 67 |
+
public:
|
| 68 |
+
PythonRRefFetchCall(worker_id_t fromWorkerId, const RRefId& rrefId)
|
| 69 |
+
: RRefMessageBase(rrefId, MessageType::PYTHON_RREF_FETCH_CALL),
|
| 70 |
+
fromWorkerId_(fromWorkerId) {}
|
| 71 |
+
|
| 72 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 73 |
+
static std::unique_ptr<PythonRRefFetchCall> fromMessage(
|
| 74 |
+
const Message& message);
|
| 75 |
+
|
| 76 |
+
private:
|
| 77 |
+
const worker_id_t fromWorkerId_;
|
| 78 |
+
};
|
| 79 |
+
|
| 80 |
+
// OwnerRRef uses this message to send the RRef value to a remote UserRRef
|
| 81 |
+
class TORCH_API RRefFetchRet : public RpcCommandBase {
|
| 82 |
+
public:
|
| 83 |
+
RRefFetchRet(std::vector<at::IValue> values, MessageType type)
|
| 84 |
+
: values_(std::move(values)), type_(type) {}
|
| 85 |
+
|
| 86 |
+
const std::vector<at::IValue>& values();
|
| 87 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 88 |
+
|
| 89 |
+
private:
|
| 90 |
+
std::vector<at::IValue> values_;
|
| 91 |
+
const MessageType type_;
|
| 92 |
+
};
|
| 93 |
+
|
| 94 |
+
class TORCH_API ScriptRRefFetchRet final : public RRefFetchRet {
|
| 95 |
+
public:
|
| 96 |
+
explicit ScriptRRefFetchRet(std::vector<at::IValue> values)
|
| 97 |
+
: RRefFetchRet(std::move(values), MessageType::SCRIPT_RREF_FETCH_RET) {}
|
| 98 |
+
|
| 99 |
+
static std::unique_ptr<ScriptRRefFetchRet> fromMessage(
|
| 100 |
+
const Message& message);
|
| 101 |
+
};
|
| 102 |
+
|
| 103 |
+
class TORCH_API PythonRRefFetchRet final : public RRefFetchRet {
|
| 104 |
+
public:
|
| 105 |
+
explicit PythonRRefFetchRet(std::vector<at::IValue> values)
|
| 106 |
+
: RRefFetchRet(std::move(values), MessageType::PYTHON_RREF_FETCH_RET) {}
|
| 107 |
+
|
| 108 |
+
static std::unique_ptr<PythonRRefFetchRet> fromMessage(
|
| 109 |
+
const Message& message);
|
| 110 |
+
};
|
| 111 |
+
|
| 112 |
+
// UserRRef (regardless it's the creator or not) uses this message to notify
|
| 113 |
+
// OwnerRRef on delete.
|
| 114 |
+
class TORCH_API RRefUserDelete final : public ForkMessageBase {
|
| 115 |
+
public:
|
| 116 |
+
RRefUserDelete(const RRefId& rrefId, const ForkId& forkId)
|
| 117 |
+
: ForkMessageBase(rrefId, forkId, MessageType::RREF_USER_DELETE) {}
|
| 118 |
+
|
| 119 |
+
static std::unique_ptr<RRefUserDelete> fromMessage(const Message& message);
|
| 120 |
+
};
|
| 121 |
+
|
| 122 |
+
class TORCH_API RemoteRet final : public ForkMessageBase {
|
| 123 |
+
public:
|
| 124 |
+
RemoteRet(const RRefId& rrefId, const ForkId& forkId)
|
| 125 |
+
: ForkMessageBase(rrefId, forkId, MessageType::REMOTE_RET) {}
|
| 126 |
+
|
| 127 |
+
static std::unique_ptr<RemoteRet> fromMessage(const Message& message);
|
| 128 |
+
};
|
| 129 |
+
|
| 130 |
+
// A child RRef uses this message to notify its parent that the child has been
|
| 131 |
+
// confirmed by the owner.
|
| 132 |
+
class TORCH_API RRefChildAccept final : public RpcCommandBase {
|
| 133 |
+
public:
|
| 134 |
+
explicit RRefChildAccept(const ForkId& forkId) : forkId_(forkId) {}
|
| 135 |
+
|
| 136 |
+
const ForkId& forkId() const;
|
| 137 |
+
|
| 138 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 139 |
+
static std::unique_ptr<RRefChildAccept> fromMessage(const Message& message);
|
| 140 |
+
|
| 141 |
+
private:
|
| 142 |
+
const ForkId forkId_;
|
| 143 |
+
};
|
| 144 |
+
|
| 145 |
+
// A child RRef uses this message to send a fork request to the owner.
|
| 146 |
+
class TORCH_API RRefForkRequest final : public ForkMessageBase {
|
| 147 |
+
public:
|
| 148 |
+
RRefForkRequest(const RRefId& rrefId, const ForkId& forkId)
|
| 149 |
+
: ForkMessageBase(rrefId, forkId, MessageType::RREF_FORK_REQUEST) {}
|
| 150 |
+
|
| 151 |
+
static std::unique_ptr<RRefForkRequest> fromMessage(const Message& message);
|
| 152 |
+
};
|
| 153 |
+
|
| 154 |
+
class TORCH_API RRefAck final : public RpcCommandBase {
|
| 155 |
+
public:
|
| 156 |
+
RRefAck() = default;
|
| 157 |
+
|
| 158 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 159 |
+
static std::unique_ptr<RRefAck> fromMessage(const Message& message);
|
| 160 |
+
};
|
| 161 |
+
|
| 162 |
+
} // namespace rpc
|
| 163 |
+
} // namespace distributed
|
| 164 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_remote_call.h
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/script_call.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 5 |
+
#include <torch/csrc/jit/runtime/operator.h>
|
| 6 |
+
#include <torch/csrc/jit/serialization/pickler.h>
|
| 7 |
+
#include <vector>
|
| 8 |
+
|
| 9 |
+
namespace torch {
|
| 10 |
+
namespace distributed {
|
| 11 |
+
namespace rpc {
|
| 12 |
+
|
| 13 |
+
using torch::jit::Operator;
|
| 14 |
+
|
| 15 |
+
// A ScriptRemoteCall instance represents an invocation of `dist.remote` on a
|
| 16 |
+
// builtin operator. Currently, it does not support using RRef as arguments yet.
|
| 17 |
+
// Besides the operator and a vector of arguments, ScriptRemoteCall also
|
| 18 |
+
// contains the RRefId and the ForkId of the return value RRef.
|
| 19 |
+
class TORCH_API ScriptRemoteCall final : public ScriptCall {
|
| 20 |
+
public:
|
| 21 |
+
// Constructor for builitin operator call.
|
| 22 |
+
ScriptRemoteCall(
|
| 23 |
+
std::shared_ptr<Operator> op,
|
| 24 |
+
std::vector<at::IValue>&& stack,
|
| 25 |
+
const RRefId& retRRefId,
|
| 26 |
+
const ForkId& retForkId);
|
| 27 |
+
|
| 28 |
+
// Constructor for TorchScript function call.
|
| 29 |
+
ScriptRemoteCall(
|
| 30 |
+
const c10::QualifiedName& qualifiedName,
|
| 31 |
+
std::vector<at::IValue>&& stack,
|
| 32 |
+
const RRefId& retRRefId,
|
| 33 |
+
const ForkId& retForkId,
|
| 34 |
+
const bool isAsyncExecution);
|
| 35 |
+
|
| 36 |
+
inline const RRefId& retRRefId() const {
|
| 37 |
+
return retRRefId_;
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
inline const ForkId& retForkId() const {
|
| 41 |
+
return retForkId_;
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
static std::unique_ptr<ScriptRemoteCall> fromIValues(
|
| 45 |
+
std::vector<at::IValue>& ivalues);
|
| 46 |
+
|
| 47 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 48 |
+
static std::unique_ptr<ScriptRemoteCall> fromMessage(const Message& message);
|
| 49 |
+
|
| 50 |
+
private:
|
| 51 |
+
const RRefId retRRefId_;
|
| 52 |
+
const ForkId retForkId_;
|
| 53 |
+
};
|
| 54 |
+
|
| 55 |
+
} // namespace rpc
|
| 56 |
+
} // namespace distributed
|
| 57 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_remote_call.h
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 5 |
+
#include <torch/csrc/distributed/rpc/unpickled_python_call.h>
|
| 6 |
+
#include <torch/csrc/utils/pybind.h>
|
| 7 |
+
|
| 8 |
+
namespace torch {
|
| 9 |
+
namespace distributed {
|
| 10 |
+
namespace rpc {
|
| 11 |
+
|
| 12 |
+
// This class converts the content in a PythonRemoteCall into py::object. This
|
| 13 |
+
// is a helper class to make sure that all arguments deserialization is done
|
| 14 |
+
// before entering RequestCallbackImpl::processRpc(...), so that the
|
| 15 |
+
// deserialization related logic can be carried out in one spot instead of
|
| 16 |
+
// scattered in multiple places for different message types.
|
| 17 |
+
// NB: The reason for not consolidating class into PythonRemoteCall is because
|
| 18 |
+
// PythonRemoteCall is a libtorch type which should not depend on Python types.
|
| 19 |
+
class TORCH_API UnpickledPythonRemoteCall final : public UnpickledPythonCall {
|
| 20 |
+
public:
|
| 21 |
+
explicit UnpickledPythonRemoteCall(
|
| 22 |
+
const SerializedPyObj& serializedPyObj,
|
| 23 |
+
const at::IValue& retRRefId,
|
| 24 |
+
const at::IValue& retForkId,
|
| 25 |
+
const bool isAsyncExecution);
|
| 26 |
+
|
| 27 |
+
const RRefId& rrefId() const;
|
| 28 |
+
const ForkId& forkId() const;
|
| 29 |
+
|
| 30 |
+
private:
|
| 31 |
+
RRefId rrefId_;
|
| 32 |
+
ForkId forkId_;
|
| 33 |
+
};
|
| 34 |
+
|
| 35 |
+
} // namespace rpc
|
| 36 |
+
} // namespace distributed
|
| 37 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/init.h
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/utils/pybind.h>
|
| 4 |
+
|
| 5 |
+
namespace torch {
|
| 6 |
+
namespace throughput_benchmark {
|
| 7 |
+
|
| 8 |
+
void initThroughputBenchmarkBindings(PyObject* module);
|
| 9 |
+
|
| 10 |
+
} // namespace throughput_benchmark
|
| 11 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/invalid_arguments.h
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/python_headers.h>
|
| 4 |
+
#include <string>
|
| 5 |
+
#include <vector>
|
| 6 |
+
|
| 7 |
+
namespace torch {
|
| 8 |
+
|
| 9 |
+
std::string format_invalid_args(
|
| 10 |
+
PyObject* given_args,
|
| 11 |
+
PyObject* given_kwargs,
|
| 12 |
+
const std::string& function_name,
|
| 13 |
+
const std::vector<std::string>& options);
|
| 14 |
+
|
| 15 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/nested.h
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/python_headers.h>
|
| 4 |
+
#include <torch/csrc/utils/python_arg_parser.h>
|
| 5 |
+
|
| 6 |
+
#include <ATen/core/Tensor.h>
|
| 7 |
+
|
| 8 |
+
namespace torch {
|
| 9 |
+
namespace utils {
|
| 10 |
+
|
| 11 |
+
at::Tensor nested_tensor_ctor(
|
| 12 |
+
c10::DispatchKey dispatch_key,
|
| 13 |
+
at::ScalarType scalar_type,
|
| 14 |
+
PythonArgs& r);
|
| 15 |
+
|
| 16 |
+
} // namespace utils
|
| 17 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/numpy_stub.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/python_headers.h>
|
| 4 |
+
|
| 5 |
+
#ifdef USE_NUMPY
|
| 6 |
+
|
| 7 |
+
#if !defined(NO_IMPORT_ARRAY) && !defined(WITH_NUMPY_IMPORT_ARRAY)
|
| 8 |
+
#define NO_IMPORT_ARRAY
|
| 9 |
+
#endif
|
| 10 |
+
|
| 11 |
+
#ifndef PY_ARRAY_UNIQUE_SYMBOL
|
| 12 |
+
#define PY_ARRAY_UNIQUE_SYMBOL __numpy_array_api
|
| 13 |
+
#endif
|
| 14 |
+
|
| 15 |
+
#ifndef NPY_NO_DEPRECATED_API
|
| 16 |
+
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
|
| 17 |
+
#endif
|
| 18 |
+
|
| 19 |
+
#include <numpy/arrayobject.h>
|
| 20 |
+
|
| 21 |
+
#endif // USE_NUMPY
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_numbers.h
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/Exceptions.h>
|
| 4 |
+
#include <torch/csrc/jit/frontend/tracer.h>
|
| 5 |
+
#include <torch/csrc/python_headers.h>
|
| 6 |
+
#include <torch/csrc/utils/object_ptr.h>
|
| 7 |
+
#include <torch/csrc/utils/tensor_numpy.h>
|
| 8 |
+
#include <cstdint>
|
| 9 |
+
#include <limits>
|
| 10 |
+
#include <stdexcept>
|
| 11 |
+
|
| 12 |
+
// largest integer that can be represented consecutively in a double
|
| 13 |
+
const int64_t DOUBLE_INT_MAX = 9007199254740992;
|
| 14 |
+
|
| 15 |
+
inline PyObject* THPUtils_packInt32(int32_t value) {
|
| 16 |
+
return PyLong_FromLong(value);
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
inline PyObject* THPUtils_packInt64(int64_t value) {
|
| 20 |
+
return PyLong_FromLongLong(value);
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
inline PyObject* THPUtils_packUInt32(uint32_t value) {
|
| 24 |
+
return PyLong_FromUnsignedLong(value);
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
inline PyObject* THPUtils_packUInt64(uint64_t value) {
|
| 28 |
+
return PyLong_FromUnsignedLongLong(value);
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
inline PyObject* THPUtils_packDoubleAsInt(double value) {
|
| 32 |
+
return PyLong_FromDouble(value);
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
inline bool THPUtils_checkLongExact(PyObject* obj) {
|
| 36 |
+
return PyLong_CheckExact(obj) && !PyBool_Check(obj);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
inline bool THPUtils_checkLong(PyObject* obj) {
|
| 40 |
+
// Fast path
|
| 41 |
+
if (THPUtils_checkLongExact(obj)) {
|
| 42 |
+
return true;
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
#ifdef USE_NUMPY
|
| 46 |
+
if (torch::utils::is_numpy_int(obj)) {
|
| 47 |
+
return true;
|
| 48 |
+
}
|
| 49 |
+
#endif
|
| 50 |
+
|
| 51 |
+
return PyLong_Check(obj) && !PyBool_Check(obj);
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
inline int32_t THPUtils_unpackInt(PyObject* obj) {
|
| 55 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
| 56 |
+
int overflow;
|
| 57 |
+
long value = PyLong_AsLongAndOverflow(obj, &overflow);
|
| 58 |
+
if (value == -1 && PyErr_Occurred()) {
|
| 59 |
+
throw python_error();
|
| 60 |
+
}
|
| 61 |
+
if (overflow != 0) {
|
| 62 |
+
throw std::runtime_error("Overflow when unpacking long");
|
| 63 |
+
}
|
| 64 |
+
if (value > std::numeric_limits<int32_t>::max() ||
|
| 65 |
+
value < std::numeric_limits<int32_t>::min()) {
|
| 66 |
+
throw std::runtime_error("Overflow when unpacking long");
|
| 67 |
+
}
|
| 68 |
+
return (int32_t)value;
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
inline int64_t THPUtils_unpackLong(PyObject* obj) {
|
| 72 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
| 73 |
+
int overflow;
|
| 74 |
+
long long value = PyLong_AsLongLongAndOverflow(obj, &overflow);
|
| 75 |
+
if (value == -1 && PyErr_Occurred()) {
|
| 76 |
+
throw python_error();
|
| 77 |
+
}
|
| 78 |
+
if (overflow != 0) {
|
| 79 |
+
throw std::runtime_error("Overflow when unpacking long");
|
| 80 |
+
}
|
| 81 |
+
return (int64_t)value;
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
inline uint32_t THPUtils_unpackUInt32(PyObject* obj) {
|
| 85 |
+
unsigned long value = PyLong_AsUnsignedLong(obj);
|
| 86 |
+
if (PyErr_Occurred()) {
|
| 87 |
+
throw python_error();
|
| 88 |
+
}
|
| 89 |
+
if (value > std::numeric_limits<uint32_t>::max()) {
|
| 90 |
+
throw std::runtime_error("Overflow when unpacking unsigned long");
|
| 91 |
+
}
|
| 92 |
+
return (uint32_t)value;
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
inline uint64_t THPUtils_unpackUInt64(PyObject* obj) {
|
| 96 |
+
unsigned long long value = PyLong_AsUnsignedLongLong(obj);
|
| 97 |
+
if (PyErr_Occurred()) {
|
| 98 |
+
throw python_error();
|
| 99 |
+
}
|
| 100 |
+
return (uint64_t)value;
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
bool THPUtils_checkIndex(PyObject* obj);
|
| 104 |
+
|
| 105 |
+
inline int64_t THPUtils_unpackIndex(PyObject* obj) {
|
| 106 |
+
if (!THPUtils_checkLong(obj)) {
|
| 107 |
+
auto index = THPObjectPtr(PyNumber_Index(obj));
|
| 108 |
+
if (index == nullptr) {
|
| 109 |
+
throw python_error();
|
| 110 |
+
}
|
| 111 |
+
// NB: This needs to be called before `index` goes out of scope and the
|
| 112 |
+
// underlying object's refcount is decremented
|
| 113 |
+
return THPUtils_unpackLong(index.get());
|
| 114 |
+
}
|
| 115 |
+
return THPUtils_unpackLong(obj);
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
inline bool THPUtils_unpackBool(PyObject* obj) {
|
| 119 |
+
if (obj == Py_True) {
|
| 120 |
+
return true;
|
| 121 |
+
} else if (obj == Py_False) {
|
| 122 |
+
return false;
|
| 123 |
+
} else {
|
| 124 |
+
throw std::runtime_error("couldn't convert python object to boolean");
|
| 125 |
+
}
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
inline bool THPUtils_checkBool(PyObject* obj) {
|
| 129 |
+
#ifdef USE_NUMPY
|
| 130 |
+
if (torch::utils::is_numpy_bool(obj)) {
|
| 131 |
+
return true;
|
| 132 |
+
}
|
| 133 |
+
#endif
|
| 134 |
+
return PyBool_Check(obj);
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
inline bool THPUtils_checkDouble(PyObject* obj) {
|
| 138 |
+
#ifdef USE_NUMPY
|
| 139 |
+
if (torch::utils::is_numpy_scalar(obj)) {
|
| 140 |
+
return true;
|
| 141 |
+
}
|
| 142 |
+
#endif
|
| 143 |
+
return PyFloat_Check(obj) || PyLong_Check(obj);
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
inline double THPUtils_unpackDouble(PyObject* obj) {
|
| 147 |
+
if (PyFloat_Check(obj)) {
|
| 148 |
+
return PyFloat_AS_DOUBLE(obj);
|
| 149 |
+
}
|
| 150 |
+
double value = PyFloat_AsDouble(obj);
|
| 151 |
+
if (value == -1 && PyErr_Occurred()) {
|
| 152 |
+
throw python_error();
|
| 153 |
+
}
|
| 154 |
+
return value;
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
inline c10::complex<double> THPUtils_unpackComplexDouble(PyObject* obj) {
|
| 158 |
+
Py_complex value = PyComplex_AsCComplex(obj);
|
| 159 |
+
if (value.real == -1.0 && PyErr_Occurred()) {
|
| 160 |
+
throw python_error();
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
return c10::complex<double>(value.real, value.imag);
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
inline bool THPUtils_unpackNumberAsBool(PyObject* obj) {
|
| 167 |
+
if (PyFloat_Check(obj)) {
|
| 168 |
+
return (bool)PyFloat_AS_DOUBLE(obj);
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
if (PyComplex_Check(obj)) {
|
| 172 |
+
double real_val = PyComplex_RealAsDouble(obj);
|
| 173 |
+
double imag_val = PyComplex_ImagAsDouble(obj);
|
| 174 |
+
return !(real_val == 0 && imag_val == 0);
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
| 178 |
+
int overflow;
|
| 179 |
+
long long value = PyLong_AsLongLongAndOverflow(obj, &overflow);
|
| 180 |
+
if (value == -1 && PyErr_Occurred()) {
|
| 181 |
+
throw python_error();
|
| 182 |
+
}
|
| 183 |
+
// No need to check overflow, because when overflow occured, it should
|
| 184 |
+
// return true in order to keep the same behavior of numpy.
|
| 185 |
+
return (bool)value;
|
| 186 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_raii.h
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <c10/util/Optional.h>
|
| 2 |
+
#include <torch/csrc/utils/pybind.h>
|
| 3 |
+
#include <tuple>
|
| 4 |
+
|
| 5 |
+
namespace torch {
|
| 6 |
+
namespace impl {
|
| 7 |
+
|
| 8 |
+
template <typename GuardT, typename... Args>
|
| 9 |
+
struct RAIIContextManager {
|
| 10 |
+
explicit RAIIContextManager(Args&&... args)
|
| 11 |
+
: args_(std::forward<Args>(args)...) {}
|
| 12 |
+
|
| 13 |
+
void enter() {
|
| 14 |
+
auto emplace = [&](Args... args) {
|
| 15 |
+
guard_.emplace(std::forward<Args>(args)...);
|
| 16 |
+
};
|
| 17 |
+
std::apply(std::move(emplace), args_);
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
void exit() {
|
| 21 |
+
guard_ = c10::nullopt;
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
private:
|
| 25 |
+
c10::optional<GuardT> guard_;
|
| 26 |
+
std::tuple<Args...> args_;
|
| 27 |
+
};
|
| 28 |
+
|
| 29 |
+
// Turns a C++ RAII guard into a Python context manager.
|
| 30 |
+
// See _ExcludeDispatchKeyGuard in python_dispatch.cpp for example.
|
| 31 |
+
template <typename GuardT, typename... GuardArgs>
|
| 32 |
+
void py_context_manager(const py::module& m, const char* name) {
|
| 33 |
+
using ContextManagerT = RAIIContextManager<GuardT, GuardArgs...>;
|
| 34 |
+
py::class_<ContextManagerT>(m, name)
|
| 35 |
+
.def(py::init<GuardArgs...>())
|
| 36 |
+
.def("__enter__", [](ContextManagerT& guard) { guard.enter(); })
|
| 37 |
+
.def(
|
| 38 |
+
"__exit__",
|
| 39 |
+
[](ContextManagerT& guard,
|
| 40 |
+
py::object exc_type,
|
| 41 |
+
py::object exc_value,
|
| 42 |
+
py::object traceback) { guard.exit(); });
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
template <typename GuardT, typename... Args>
|
| 46 |
+
struct DeprecatedRAIIContextManager {
|
| 47 |
+
explicit DeprecatedRAIIContextManager(Args&&... args) {
|
| 48 |
+
guard_.emplace(std::forward<Args>(args)...);
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
void enter() {}
|
| 52 |
+
|
| 53 |
+
void exit() {
|
| 54 |
+
guard_ = c10::nullopt;
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
private:
|
| 58 |
+
c10::optional<GuardT> guard_;
|
| 59 |
+
std::tuple<Args...> args_;
|
| 60 |
+
};
|
| 61 |
+
|
| 62 |
+
// Definition: a "Python RAII guard" is an object in Python that acquires
|
| 63 |
+
// a resource on init and releases the resource on deletion.
|
| 64 |
+
//
|
| 65 |
+
// This API turns a C++ RAII guard into an object can be used either as a
|
| 66 |
+
// Python context manager or as a "Python RAII guard".
|
| 67 |
+
//
|
| 68 |
+
// Please prefer `py_context_manager` to this API if you are binding a new
|
| 69 |
+
// RAII guard into Python because "Python RAII guards" don't work as expected
|
| 70 |
+
// in Python (Python makes no guarantees about when an object gets deleted)
|
| 71 |
+
template <typename GuardT, typename... GuardArgs>
|
| 72 |
+
void py_context_manager_DEPRECATED(const py::module& m, const char* name) {
|
| 73 |
+
using ContextManagerT = DeprecatedRAIIContextManager<GuardT, GuardArgs...>;
|
| 74 |
+
py::class_<ContextManagerT>(m, name)
|
| 75 |
+
.def(py::init<GuardArgs...>())
|
| 76 |
+
.def("__enter__", [](ContextManagerT& guard) { guard.enter(); })
|
| 77 |
+
.def(
|
| 78 |
+
"__exit__",
|
| 79 |
+
[](ContextManagerT& guard,
|
| 80 |
+
py::object exc_type,
|
| 81 |
+
py::object exc_value,
|
| 82 |
+
py::object traceback) { guard.exit(); });
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
} // namespace impl
|
| 86 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_strings.h
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/python_headers.h>
|
| 4 |
+
#include <torch/csrc/utils/object_ptr.h>
|
| 5 |
+
#include <torch/csrc/utils/pybind.h>
|
| 6 |
+
#include <stdexcept>
|
| 7 |
+
#include <string>
|
| 8 |
+
|
| 9 |
+
// Utilities for handling Python strings. Note that PyString, when defined, is
|
| 10 |
+
// the same as PyBytes.
|
| 11 |
+
|
| 12 |
+
// Returns true if obj is a bytes/str or unicode object
|
| 13 |
+
// As of Python 3.6, this does not require the GIL
|
| 14 |
+
inline bool THPUtils_checkString(PyObject* obj) {
|
| 15 |
+
return PyBytes_Check(obj) || PyUnicode_Check(obj);
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
// Unpacks PyBytes (PyString) or PyUnicode as std::string
|
| 19 |
+
// PyBytes are unpacked as-is. PyUnicode is unpacked as UTF-8.
|
| 20 |
+
// NOTE: this method requires the GIL
|
| 21 |
+
inline std::string THPUtils_unpackString(PyObject* obj) {
|
| 22 |
+
if (PyBytes_Check(obj)) {
|
| 23 |
+
size_t size = PyBytes_GET_SIZE(obj);
|
| 24 |
+
return std::string(PyBytes_AS_STRING(obj), size);
|
| 25 |
+
}
|
| 26 |
+
if (PyUnicode_Check(obj)) {
|
| 27 |
+
Py_ssize_t size = 0;
|
| 28 |
+
const char* data = PyUnicode_AsUTF8AndSize(obj, &size);
|
| 29 |
+
if (!data) {
|
| 30 |
+
throw std::runtime_error("error unpacking string as utf-8");
|
| 31 |
+
}
|
| 32 |
+
return std::string(data, (size_t)size);
|
| 33 |
+
}
|
| 34 |
+
throw std::runtime_error("unpackString: expected bytes or unicode object");
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
// Unpacks PyBytes (PyString) or PyUnicode as c10::string_view
|
| 38 |
+
// PyBytes are unpacked as-is. PyUnicode is unpacked as UTF-8.
|
| 39 |
+
// NOTE: If `obj` is destroyed, then the non-owning c10::string_view will
|
| 40 |
+
// become invalid. If the string needs to be accessed at any point after
|
| 41 |
+
// `obj` is destroyed, then the c10::string_view should be copied into
|
| 42 |
+
// a std::string, or another owning object, and kept alive. For an example,
|
| 43 |
+
// look at how IValue and autograd nodes handle c10::string_view arguments.
|
| 44 |
+
// NOTE: this method requires the GIL
|
| 45 |
+
inline c10::string_view THPUtils_unpackStringView(PyObject* obj) {
|
| 46 |
+
if (PyBytes_Check(obj)) {
|
| 47 |
+
size_t size = PyBytes_GET_SIZE(obj);
|
| 48 |
+
return c10::string_view(PyBytes_AS_STRING(obj), size);
|
| 49 |
+
}
|
| 50 |
+
if (PyUnicode_Check(obj)) {
|
| 51 |
+
Py_ssize_t size = 0;
|
| 52 |
+
const char* data = PyUnicode_AsUTF8AndSize(obj, &size);
|
| 53 |
+
if (!data) {
|
| 54 |
+
throw std::runtime_error("error unpacking string as utf-8");
|
| 55 |
+
}
|
| 56 |
+
return c10::string_view(data, (size_t)size);
|
| 57 |
+
}
|
| 58 |
+
throw std::runtime_error("unpackString: expected bytes or unicode object");
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
inline PyObject* THPUtils_packString(const char* str) {
|
| 62 |
+
return PyUnicode_FromString(str);
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
inline PyObject* THPUtils_packString(const std::string& str) {
|
| 66 |
+
return PyUnicode_FromStringAndSize(str.c_str(), str.size());
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
inline PyObject* THPUtils_internString(const std::string& str) {
|
| 70 |
+
return PyUnicode_InternFromString(str.c_str());
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
// Precondition: THPUtils_checkString(obj) must be true
|
| 74 |
+
inline bool THPUtils_isInterned(PyObject* obj) {
|
| 75 |
+
return PyUnicode_CHECK_INTERNED(obj);
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
// Precondition: THPUtils_checkString(obj) must be true
|
| 79 |
+
inline void THPUtils_internStringInPlace(PyObject** obj) {
|
| 80 |
+
PyUnicode_InternInPlace(obj);
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
/*
|
| 84 |
+
* Reference:
|
| 85 |
+
* https://github.com/numpy/numpy/blob/f4c497c768e0646df740b647782df463825bfd27/numpy/core/src/common/get_attr_string.h#L42
|
| 86 |
+
*
|
| 87 |
+
* Stripped down version of PyObject_GetAttrString,
|
| 88 |
+
* avoids lookups for None, tuple, and List objects,
|
| 89 |
+
* and doesn't create a PyErr since this code ignores it.
|
| 90 |
+
*
|
| 91 |
+
* This can be much faster then PyObject_GetAttrString where
|
| 92 |
+
* exceptions are not used by caller.
|
| 93 |
+
*
|
| 94 |
+
* 'obj' is the object to search for attribute.
|
| 95 |
+
*
|
| 96 |
+
* 'name' is the attribute to search for.
|
| 97 |
+
*
|
| 98 |
+
* Returns a py::object wrapping the return value. If the attribute lookup
|
| 99 |
+
* failed the value will be NULL.
|
| 100 |
+
*
|
| 101 |
+
*/
|
| 102 |
+
|
| 103 |
+
// NOLINTNEXTLINE(clang-diagnostic-unused-function)
|
| 104 |
+
static py::object PyObject_FastGetAttrString(PyObject* obj, const char* name) {
|
| 105 |
+
PyTypeObject* tp = Py_TYPE(obj);
|
| 106 |
+
PyObject* res = (PyObject*)nullptr;
|
| 107 |
+
|
| 108 |
+
/* Attribute referenced by (char *)name */
|
| 109 |
+
if (tp->tp_getattr != nullptr) {
|
| 110 |
+
// This is OK per https://bugs.python.org/issue39620
|
| 111 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
| 112 |
+
res = (*tp->tp_getattr)(obj, const_cast<char*>(name));
|
| 113 |
+
if (res == nullptr) {
|
| 114 |
+
PyErr_Clear();
|
| 115 |
+
}
|
| 116 |
+
}
|
| 117 |
+
/* Attribute referenced by (PyObject *)name */
|
| 118 |
+
else if (tp->tp_getattro != nullptr) {
|
| 119 |
+
auto w = py::reinterpret_steal<py::object>(THPUtils_internString(name));
|
| 120 |
+
if (w.ptr() == nullptr) {
|
| 121 |
+
return py::object();
|
| 122 |
+
}
|
| 123 |
+
res = (*tp->tp_getattro)(obj, w.ptr());
|
| 124 |
+
if (res == nullptr) {
|
| 125 |
+
PyErr_Clear();
|
| 126 |
+
}
|
| 127 |
+
}
|
| 128 |
+
return py::reinterpret_steal<py::object>(res);
|
| 129 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_stub.h
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
struct _object;
|
| 4 |
+
using PyObject = _object;
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_torch_function_mode.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/PythonTorchFunctionTLS.h>
|
| 4 |
+
|
| 5 |
+
namespace torch {
|
| 6 |
+
namespace overrides {
|
| 7 |
+
|
| 8 |
+
struct StashTorchFunctionModeGuard {
|
| 9 |
+
StashTorchFunctionModeGuard() {
|
| 10 |
+
cur_mode_ = at::impl::PythonTorchFunctionTLS::pop_stack();
|
| 11 |
+
}
|
| 12 |
+
~StashTorchFunctionModeGuard() {
|
| 13 |
+
at::impl::PythonTorchFunctionTLS::push_onto_stack(cur_mode_);
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
const std::shared_ptr<c10::SafePyObject>& get_cur_mode() {
|
| 17 |
+
return cur_mode_;
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
private:
|
| 21 |
+
std::shared_ptr<c10::SafePyObject> cur_mode_;
|
| 22 |
+
};
|
| 23 |
+
|
| 24 |
+
} // namespace overrides
|
| 25 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/schema_info.h
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/jit/frontend/function_schema_parser.h>
|
| 4 |
+
#include <unordered_set>
|
| 5 |
+
|
| 6 |
+
namespace torch {
|
| 7 |
+
namespace utils {
|
| 8 |
+
|
| 9 |
+
using SchemaSpecialCasePair =
|
| 10 |
+
std::pair<c10::FunctionSchema, std::unordered_set<std::string>>;
|
| 11 |
+
/**
|
| 12 |
+
* class SchemaInfo
|
| 13 |
+
*
|
| 14 |
+
* FunctionSchema wrapper that publicizes argument value specific operator
|
| 15 |
+
* behavior (mutation, aliasing, special cases, etc...)
|
| 16 |
+
*/
|
| 17 |
+
|
| 18 |
+
struct TORCH_API SchemaInfo {
|
| 19 |
+
public:
|
| 20 |
+
explicit SchemaInfo(c10::FunctionSchema schema)
|
| 21 |
+
: schema_(std::move(schema)),
|
| 22 |
+
alias_maps_current_(false),
|
| 23 |
+
has_init_(false) {}
|
| 24 |
+
explicit SchemaInfo(const char* signature)
|
| 25 |
+
: schema_(torch::jit::parseSchema(signature)),
|
| 26 |
+
alias_maps_current_(false),
|
| 27 |
+
has_init_(false) {}
|
| 28 |
+
|
| 29 |
+
bool is_mutable();
|
| 30 |
+
|
| 31 |
+
bool is_mutable(const c10::SchemaArgument& argument);
|
| 32 |
+
|
| 33 |
+
bool is_mutable(c10::string_view name);
|
| 34 |
+
|
| 35 |
+
bool has_argument(c10::string_view name);
|
| 36 |
+
|
| 37 |
+
bool is_nondeterministic() const;
|
| 38 |
+
|
| 39 |
+
// Returns whether lhs and rhs may alias directly.
|
| 40 |
+
// This does not account for cases where lhs or rhs are a container that
|
| 41 |
+
// may contain elements that alias the other argument.
|
| 42 |
+
// Besides the checks already included in FunctionSchema::may_alias, this
|
| 43 |
+
// method also accounts special aliasing cases causes by aliasing argument
|
| 44 |
+
// values supplied from addArgumentValue.
|
| 45 |
+
bool may_alias(
|
| 46 |
+
const c10::SchemaArgument& lhs,
|
| 47 |
+
const c10::SchemaArgument& rhs);
|
| 48 |
+
|
| 49 |
+
// Returns whether lhs and rhs may alias directly or whether lhs/rhs are a
|
| 50 |
+
// container that may contain elements that alias the other argument. Besides
|
| 51 |
+
// the checks already included in FunctionSchema::may_contain_alias, this
|
| 52 |
+
// method also accounts for special aliasing cases causes by aliasing argument
|
| 53 |
+
// values supplied from addArgumentValue. bidirectional = false only returns
|
| 54 |
+
// whether lhs may contain an alias of rhs while bidirectional = true returns
|
| 55 |
+
// both directions.
|
| 56 |
+
bool may_contain_alias(
|
| 57 |
+
const c10::SchemaArgument& lhs,
|
| 58 |
+
const c10::SchemaArgument& rhs,
|
| 59 |
+
bool bidirectional = true);
|
| 60 |
+
|
| 61 |
+
void addArgumentValue(const std::string& name, const at::IValue& value);
|
| 62 |
+
|
| 63 |
+
void addArgumentValues(
|
| 64 |
+
const std::vector<c10::optional<at::IValue>>& value_list);
|
| 65 |
+
|
| 66 |
+
void addArgumentValues(
|
| 67 |
+
const std::unordered_map<std::string, at::IValue>& values);
|
| 68 |
+
|
| 69 |
+
bool hasInputArgumentNamed(const std::string& name) const;
|
| 70 |
+
|
| 71 |
+
private:
|
| 72 |
+
// This function enforces more conservative results when the TORCH_WARN is
|
| 73 |
+
// triggered from above due to duplicates in an argument list
|
| 74 |
+
void ensureConservativity(
|
| 75 |
+
const std::unordered_set<at::Symbol>& duplicates,
|
| 76 |
+
const std::vector<c10::Argument>& arguments_list,
|
| 77 |
+
c10::SchemaArgType type);
|
| 78 |
+
|
| 79 |
+
void initSchemaInfo();
|
| 80 |
+
|
| 81 |
+
void generateAliasMaps();
|
| 82 |
+
|
| 83 |
+
bool mayContainAliasImpl(
|
| 84 |
+
const c10::SchemaArgument& lhs,
|
| 85 |
+
const c10::SchemaArgument& rhs);
|
| 86 |
+
|
| 87 |
+
static std::vector<c10::FunctionSchema> getNonDeterministicOps();
|
| 88 |
+
|
| 89 |
+
static std::vector<SchemaSpecialCasePair> getTrainingOps();
|
| 90 |
+
|
| 91 |
+
const std::unordered_set<c10::SchemaArgument>& wildcardSet();
|
| 92 |
+
|
| 93 |
+
const std::unordered_set<c10::SchemaArgument>& containerSet();
|
| 94 |
+
|
| 95 |
+
// Set of all wildcard arguments
|
| 96 |
+
std::unordered_set<c10::SchemaArgument> wildcard_set_;
|
| 97 |
+
|
| 98 |
+
// Set of all container arguments
|
| 99 |
+
std::unordered_set<c10::SchemaArgument> container_set_;
|
| 100 |
+
|
| 101 |
+
// Map of argument IValues
|
| 102 |
+
std::unordered_map<std::string, at::IValue> value_map_;
|
| 103 |
+
|
| 104 |
+
// Alias map of inputs with each other
|
| 105 |
+
std::vector<std::unordered_set<size_t>> input_alias_map_;
|
| 106 |
+
|
| 107 |
+
// Alias map of outputs to inputs
|
| 108 |
+
std::vector<std::unordered_set<size_t>> output_alias_map_;
|
| 109 |
+
|
| 110 |
+
const c10::FunctionSchema schema_;
|
| 111 |
+
|
| 112 |
+
bool alias_maps_current_;
|
| 113 |
+
|
| 114 |
+
bool has_init_;
|
| 115 |
+
};
|
| 116 |
+
} // namespace utils
|
| 117 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/structseq.h
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/python_headers.h>
|
| 4 |
+
|
| 5 |
+
namespace torch {
|
| 6 |
+
namespace utils {
|
| 7 |
+
|
| 8 |
+
PyObject* returned_structseq_repr(PyStructSequence* obj);
|
| 9 |
+
|
| 10 |
+
}
|
| 11 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_list.h
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/python_headers.h>
|
| 4 |
+
|
| 5 |
+
namespace at {
|
| 6 |
+
class Tensor;
|
| 7 |
+
}
|
| 8 |
+
|
| 9 |
+
namespace torch {
|
| 10 |
+
namespace utils {
|
| 11 |
+
|
| 12 |
+
PyObject* tensor_to_list(const at::Tensor& tensor);
|
| 13 |
+
|
| 14 |
+
}
|
| 15 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_numpy.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Tensor.h>
|
| 4 |
+
#include <torch/csrc/python_headers.h>
|
| 5 |
+
|
| 6 |
+
namespace torch {
|
| 7 |
+
namespace utils {
|
| 8 |
+
|
| 9 |
+
PyObject* tensor_to_numpy(const at::Tensor& tensor, bool force = false);
|
| 10 |
+
at::Tensor tensor_from_numpy(PyObject* obj, bool warn_if_not_writeable = true);
|
| 11 |
+
|
| 12 |
+
int aten_to_numpy_dtype(const at::ScalarType scalar_type);
|
| 13 |
+
at::ScalarType numpy_dtype_to_aten(int dtype);
|
| 14 |
+
|
| 15 |
+
bool is_numpy_available();
|
| 16 |
+
bool is_numpy_int(PyObject* obj);
|
| 17 |
+
bool is_numpy_bool(PyObject* obj);
|
| 18 |
+
bool is_numpy_scalar(PyObject* obj);
|
| 19 |
+
|
| 20 |
+
void warn_numpy_not_writeable();
|
| 21 |
+
at::Tensor tensor_from_cuda_array_interface(PyObject* obj);
|
| 22 |
+
|
| 23 |
+
void validate_numpy_for_dlpack_deleter_bug();
|
| 24 |
+
bool is_numpy_dlpack_deleter_bugged();
|
| 25 |
+
|
| 26 |
+
} // namespace utils
|
| 27 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_types.h
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/DeprecatedTypeProperties.h>
|
| 4 |
+
#include <c10/core/TensorOptions.h>
|
| 5 |
+
#include <utility>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
namespace torch {
|
| 9 |
+
namespace utils {
|
| 10 |
+
|
| 11 |
+
std::string options_to_string(const at::TensorOptions& options);
|
| 12 |
+
std::string type_to_string(const at::DeprecatedTypeProperties& type);
|
| 13 |
+
at::TensorOptions options_from_string(const std::string& str);
|
| 14 |
+
|
| 15 |
+
// return a vector of all "declared" types, even those that weren't compiled
|
| 16 |
+
std::vector<std::pair<at::Backend, at::ScalarType>> all_declared_types();
|
| 17 |
+
|
| 18 |
+
} // namespace utils
|
| 19 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/throughput_benchmark-inl.h
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <random>
|
| 4 |
+
#include <thread>
|
| 5 |
+
|
| 6 |
+
#include <torch/csrc/autograd/profiler.h>
|
| 7 |
+
#include <torch/csrc/jit/python/pybind_utils.h>
|
| 8 |
+
#include <torch/csrc/utils/pybind.h>
|
| 9 |
+
|
| 10 |
+
#include <ATen/Parallel.h>
|
| 11 |
+
#include <c10/util/irange.h>
|
| 12 |
+
|
| 13 |
+
namespace torch {
|
| 14 |
+
namespace throughput_benchmark {
|
| 15 |
+
namespace detail {
|
| 16 |
+
|
| 17 |
+
template <class Input, class Output, class Model>
|
| 18 |
+
BenchmarkExecutionStats BenchmarkHelper<Input, Output, Model>::benchmark(
|
| 19 |
+
const BenchmarkConfig& config) const {
|
| 20 |
+
CHECK(initialized_);
|
| 21 |
+
TORCH_CHECK(
|
| 22 |
+
config.num_worker_threads == 1,
|
| 23 |
+
"Only parallelization by callers is supported");
|
| 24 |
+
|
| 25 |
+
LOG(INFO) << at::get_parallel_info();
|
| 26 |
+
|
| 27 |
+
// We pre-generate inputs here for each of the threads. This allows us to
|
| 28 |
+
// safely move inputs out for each of the threads independently and thus avoid
|
| 29 |
+
// overhead from the benchmark runner itself
|
| 30 |
+
std::vector<std::vector<Input>> thread_inputs(config.num_calling_threads);
|
| 31 |
+
std::vector<size_t> input_iters(config.num_calling_threads);
|
| 32 |
+
{
|
| 33 |
+
std::random_device seeder;
|
| 34 |
+
std::mt19937 engine(seeder());
|
| 35 |
+
TORCH_CHECK(
|
| 36 |
+
!inputs_.empty(),
|
| 37 |
+
"Please provide benchmark inputs."
|
| 38 |
+
"Did you forget to call add_input()? ");
|
| 39 |
+
std::uniform_int_distribution<int> dist(0, inputs_.size() - 1);
|
| 40 |
+
|
| 41 |
+
for (const auto thread_id : c10::irange(config.num_calling_threads)) {
|
| 42 |
+
// Just in case we generate num_iters inputs for each of the threads
|
| 43 |
+
// This was if one thread does all the work we will be fine
|
| 44 |
+
for (const auto i [[maybe_unused]] :
|
| 45 |
+
c10::irange(config.num_iters + config.num_warmup_iters)) {
|
| 46 |
+
thread_inputs[thread_id].push_back(cloneInput(inputs_[dist(engine)]));
|
| 47 |
+
}
|
| 48 |
+
input_iters[thread_id] = 0;
|
| 49 |
+
}
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
std::mutex m;
|
| 53 |
+
std::condition_variable worker_main_cv;
|
| 54 |
+
std::condition_variable main_worker_cv;
|
| 55 |
+
// TODO: add GUARDED_BY once it is available
|
| 56 |
+
int64_t initialized{0};
|
| 57 |
+
int64_t finished{0};
|
| 58 |
+
bool start{false};
|
| 59 |
+
std::atomic<int64_t> num_attempted_iters{0};
|
| 60 |
+
std::vector<std::thread> callers;
|
| 61 |
+
|
| 62 |
+
callers.reserve(config.num_calling_threads);
|
| 63 |
+
for (const auto thread_id : c10::irange(config.num_calling_threads)) {
|
| 64 |
+
callers.emplace_back([&, thread_id]() {
|
| 65 |
+
// We use conditional variable as a barrier to make sure each thread
|
| 66 |
+
// performs required warmeup iterations before we start measuring
|
| 67 |
+
for (const auto j : c10::irange(config.num_warmup_iters)) {
|
| 68 |
+
(void)j;
|
| 69 |
+
runOnce(std::move(thread_inputs[thread_id][input_iters[thread_id]]));
|
| 70 |
+
++input_iters[thread_id];
|
| 71 |
+
}
|
| 72 |
+
{
|
| 73 |
+
std::unique_lock<std::mutex> lock(m);
|
| 74 |
+
++initialized;
|
| 75 |
+
worker_main_cv.notify_one();
|
| 76 |
+
// NOLINTNEXTLINE(bugprone-infinite-loop)
|
| 77 |
+
while (!start) {
|
| 78 |
+
main_worker_cv.wait(lock);
|
| 79 |
+
}
|
| 80 |
+
}
|
| 81 |
+
LOG(INFO) << "Starting forward thread " << thread_id;
|
| 82 |
+
while (num_attempted_iters.fetch_add(1) < config.num_iters) {
|
| 83 |
+
runOnce(std::move(thread_inputs[thread_id][input_iters[thread_id]]));
|
| 84 |
+
++input_iters[thread_id];
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
{
|
| 88 |
+
std::unique_lock<std::mutex> lock(m);
|
| 89 |
+
++finished;
|
| 90 |
+
worker_main_cv.notify_one();
|
| 91 |
+
LOG(INFO) << "Shutting down forward thread " << thread_id
|
| 92 |
+
<< ". Total number of finished threads: " << finished;
|
| 93 |
+
}
|
| 94 |
+
});
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
using Clock = std::chrono::high_resolution_clock;
|
| 98 |
+
using RecordProfile = torch::autograd::profiler::RecordProfile;
|
| 99 |
+
using TimePoint = std::chrono::time_point<Clock>;
|
| 100 |
+
TimePoint start_time;
|
| 101 |
+
|
| 102 |
+
std::unique_ptr<RecordProfile> profiler_guard;
|
| 103 |
+
{
|
| 104 |
+
std::unique_lock<std::mutex> lock(m);
|
| 105 |
+
while (initialized != config.num_calling_threads) {
|
| 106 |
+
worker_main_cv.wait(lock);
|
| 107 |
+
}
|
| 108 |
+
if (!config.profiler_output_path.empty()) {
|
| 109 |
+
LOG(INFO) << "Using Autograd profiler. Trace will be saved to "
|
| 110 |
+
<< config.profiler_output_path;
|
| 111 |
+
profiler_guard =
|
| 112 |
+
std::make_unique<RecordProfile>(config.profiler_output_path);
|
| 113 |
+
}
|
| 114 |
+
LOG(INFO) << "Starting threads";
|
| 115 |
+
start = true;
|
| 116 |
+
start_time = Clock::now();
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
main_worker_cv.notify_all();
|
| 120 |
+
{
|
| 121 |
+
std::unique_lock<std::mutex> lock(m);
|
| 122 |
+
worker_main_cv.wait(
|
| 123 |
+
lock, [&]() { return finished == config.num_calling_threads; });
|
| 124 |
+
}
|
| 125 |
+
auto end_time = std::chrono::high_resolution_clock::now();
|
| 126 |
+
profiler_guard.reset();
|
| 127 |
+
LOG(INFO) << "Finished benchmark";
|
| 128 |
+
|
| 129 |
+
BenchmarkExecutionStats stats;
|
| 130 |
+
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
| 131 |
+
float total_time_ms = std::chrono::duration_cast<std::chrono::nanoseconds>(
|
| 132 |
+
end_time - start_time)
|
| 133 |
+
.count() /
|
| 134 |
+
1000.0 / 1000.0;
|
| 135 |
+
// We use config.num_iters instead of num_attempted_iters as it is
|
| 136 |
+
// repsesatative of the real work done. Last attempted iteration on each
|
| 137 |
+
// calling threads doesn't represent the real work (i.e. running the model)
|
| 138 |
+
stats.latency_avg_ms =
|
| 139 |
+
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
| 140 |
+
total_time_ms * config.num_calling_threads / config.num_iters;
|
| 141 |
+
stats.num_iters = config.num_iters;
|
| 142 |
+
|
| 143 |
+
for (auto& t : callers) {
|
| 144 |
+
t.join();
|
| 145 |
+
}
|
| 146 |
+
return stats;
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
} // namespace detail
|
| 150 |
+
} // namespace throughput_benchmark
|
| 151 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/utils/torch_dispatch_mode.h
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/impl/TorchDispatchModeTLS.h>
|
| 4 |
+
|
| 5 |
+
namespace torch {
|
| 6 |
+
namespace torch_dispatch_mode {
|
| 7 |
+
|
| 8 |
+
struct StashTorchDispatchModeGuard {
|
| 9 |
+
public:
|
| 10 |
+
StashTorchDispatchModeGuard() {
|
| 11 |
+
if (c10::impl::TorchDispatchModeTLS::any_modes_set(
|
| 12 |
+
/*skip_infra_modes=*/true)) {
|
| 13 |
+
saved_mode_ = c10::impl::TorchDispatchModeTLS::pop_stack();
|
| 14 |
+
} else {
|
| 15 |
+
auto mode_and_key =
|
| 16 |
+
c10::impl::TorchDispatchModeTLS::pop_highest_infra_mode();
|
| 17 |
+
saved_mode_ = std::move(std::get<0>(mode_and_key));
|
| 18 |
+
saved_mode_key_ = std::get<1>(mode_and_key);
|
| 19 |
+
}
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
~StashTorchDispatchModeGuard() {
|
| 23 |
+
if (saved_mode_key_ != c10::nullopt) {
|
| 24 |
+
c10::impl::TorchDispatchModeTLS::set_mode(
|
| 25 |
+
saved_mode_, saved_mode_key_.value());
|
| 26 |
+
} else {
|
| 27 |
+
c10::impl::TorchDispatchModeTLS::push_non_infra_mode_onto_stack(
|
| 28 |
+
std::move(saved_mode_));
|
| 29 |
+
}
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
const std::shared_ptr<c10::SafePyObject>& get_cur_mode() {
|
| 33 |
+
return saved_mode_;
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
private:
|
| 37 |
+
std::shared_ptr<at::SafePyObject> saved_mode_;
|
| 38 |
+
c10::optional<c10::impl::TorchDispatchModeKey> saved_mode_key_;
|
| 39 |
+
};
|
| 40 |
+
|
| 41 |
+
struct StashTorchDispatchStackGuard {
|
| 42 |
+
public:
|
| 43 |
+
StashTorchDispatchStackGuard() {
|
| 44 |
+
auto old = c10::impl::TorchDispatchModeTLS::get_state();
|
| 45 |
+
c10::impl::TorchDispatchModeTLS::set_state(std::move(saved_state_));
|
| 46 |
+
saved_state_ = std::move(old);
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
~StashTorchDispatchStackGuard() {
|
| 50 |
+
c10::impl::TorchDispatchModeTLS::set_state(std::move(saved_state_));
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
private:
|
| 54 |
+
c10::impl::TorchDispatchModeTLS saved_state_;
|
| 55 |
+
};
|
| 56 |
+
|
| 57 |
+
} // namespace torch_dispatch_mode
|
| 58 |
+
} // namespace torch
|
vllm/lib/python3.10/site-packages/dns/_features.py
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
|
| 2 |
+
|
| 3 |
+
import importlib.metadata
|
| 4 |
+
import itertools
|
| 5 |
+
import string
|
| 6 |
+
from typing import Dict, List, Tuple
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def _tuple_from_text(version: str) -> Tuple:
|
| 10 |
+
text_parts = version.split(".")
|
| 11 |
+
int_parts = []
|
| 12 |
+
for text_part in text_parts:
|
| 13 |
+
digit_prefix = "".join(
|
| 14 |
+
itertools.takewhile(lambda x: x in string.digits, text_part)
|
| 15 |
+
)
|
| 16 |
+
try:
|
| 17 |
+
int_parts.append(int(digit_prefix))
|
| 18 |
+
except Exception:
|
| 19 |
+
break
|
| 20 |
+
return tuple(int_parts)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def _version_check(
|
| 24 |
+
requirement: str,
|
| 25 |
+
) -> bool:
|
| 26 |
+
"""Is the requirement fulfilled?
|
| 27 |
+
|
| 28 |
+
The requirement must be of the form
|
| 29 |
+
|
| 30 |
+
package>=version
|
| 31 |
+
"""
|
| 32 |
+
package, minimum = requirement.split(">=")
|
| 33 |
+
try:
|
| 34 |
+
version = importlib.metadata.version(package)
|
| 35 |
+
# This shouldn't happen, but it apparently can.
|
| 36 |
+
if version is None:
|
| 37 |
+
return False
|
| 38 |
+
except Exception:
|
| 39 |
+
return False
|
| 40 |
+
t_version = _tuple_from_text(version)
|
| 41 |
+
t_minimum = _tuple_from_text(minimum)
|
| 42 |
+
if t_version < t_minimum:
|
| 43 |
+
return False
|
| 44 |
+
return True
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
_cache: Dict[str, bool] = {}
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def have(feature: str) -> bool:
|
| 51 |
+
"""Is *feature* available?
|
| 52 |
+
|
| 53 |
+
This tests if all optional packages needed for the
|
| 54 |
+
feature are available and recent enough.
|
| 55 |
+
|
| 56 |
+
Returns ``True`` if the feature is available,
|
| 57 |
+
and ``False`` if it is not or if metadata is
|
| 58 |
+
missing.
|
| 59 |
+
"""
|
| 60 |
+
value = _cache.get(feature)
|
| 61 |
+
if value is not None:
|
| 62 |
+
return value
|
| 63 |
+
requirements = _requirements.get(feature)
|
| 64 |
+
if requirements is None:
|
| 65 |
+
# we make a cache entry here for consistency not performance
|
| 66 |
+
_cache[feature] = False
|
| 67 |
+
return False
|
| 68 |
+
ok = True
|
| 69 |
+
for requirement in requirements:
|
| 70 |
+
if not _version_check(requirement):
|
| 71 |
+
ok = False
|
| 72 |
+
break
|
| 73 |
+
_cache[feature] = ok
|
| 74 |
+
return ok
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def force(feature: str, enabled: bool) -> None:
|
| 78 |
+
"""Force the status of *feature* to be *enabled*.
|
| 79 |
+
|
| 80 |
+
This method is provided as a workaround for any cases
|
| 81 |
+
where importlib.metadata is ineffective, or for testing.
|
| 82 |
+
"""
|
| 83 |
+
_cache[feature] = enabled
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
_requirements: Dict[str, List[str]] = {
|
| 87 |
+
### BEGIN generated requirements
|
| 88 |
+
"dnssec": ["cryptography>=43"],
|
| 89 |
+
"doh": ["httpcore>=1.0.0", "httpx>=0.26.0", "h2>=4.1.0"],
|
| 90 |
+
"doq": ["aioquic>=1.0.0"],
|
| 91 |
+
"idna": ["idna>=3.7"],
|
| 92 |
+
"trio": ["trio>=0.23"],
|
| 93 |
+
"wmi": ["wmi>=1.5.1"],
|
| 94 |
+
### END generated requirements
|
| 95 |
+
}
|
vllm/lib/python3.10/site-packages/dns/_trio_backend.py
ADDED
|
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
|
| 2 |
+
|
| 3 |
+
"""trio async I/O library query support"""
|
| 4 |
+
|
| 5 |
+
import socket
|
| 6 |
+
|
| 7 |
+
import trio
|
| 8 |
+
import trio.socket # type: ignore
|
| 9 |
+
|
| 10 |
+
import dns._asyncbackend
|
| 11 |
+
import dns._features
|
| 12 |
+
import dns.exception
|
| 13 |
+
import dns.inet
|
| 14 |
+
|
| 15 |
+
if not dns._features.have("trio"):
|
| 16 |
+
raise ImportError("trio not found or too old")
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def _maybe_timeout(timeout):
|
| 20 |
+
if timeout is not None:
|
| 21 |
+
return trio.move_on_after(timeout)
|
| 22 |
+
else:
|
| 23 |
+
return dns._asyncbackend.NullContext()
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# for brevity
|
| 27 |
+
_lltuple = dns.inet.low_level_address_tuple
|
| 28 |
+
|
| 29 |
+
# pylint: disable=redefined-outer-name
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class DatagramSocket(dns._asyncbackend.DatagramSocket):
|
| 33 |
+
def __init__(self, sock):
|
| 34 |
+
super().__init__(sock.family, socket.SOCK_DGRAM)
|
| 35 |
+
self.socket = sock
|
| 36 |
+
|
| 37 |
+
async def sendto(self, what, destination, timeout):
|
| 38 |
+
with _maybe_timeout(timeout):
|
| 39 |
+
if destination is None:
|
| 40 |
+
return await self.socket.send(what)
|
| 41 |
+
else:
|
| 42 |
+
return await self.socket.sendto(what, destination)
|
| 43 |
+
raise dns.exception.Timeout(
|
| 44 |
+
timeout=timeout
|
| 45 |
+
) # pragma: no cover lgtm[py/unreachable-statement]
|
| 46 |
+
|
| 47 |
+
async def recvfrom(self, size, timeout):
|
| 48 |
+
with _maybe_timeout(timeout):
|
| 49 |
+
return await self.socket.recvfrom(size)
|
| 50 |
+
raise dns.exception.Timeout(timeout=timeout) # lgtm[py/unreachable-statement]
|
| 51 |
+
|
| 52 |
+
async def close(self):
|
| 53 |
+
self.socket.close()
|
| 54 |
+
|
| 55 |
+
async def getpeername(self):
|
| 56 |
+
return self.socket.getpeername()
|
| 57 |
+
|
| 58 |
+
async def getsockname(self):
|
| 59 |
+
return self.socket.getsockname()
|
| 60 |
+
|
| 61 |
+
async def getpeercert(self, timeout):
|
| 62 |
+
raise NotImplementedError
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class StreamSocket(dns._asyncbackend.StreamSocket):
|
| 66 |
+
def __init__(self, family, stream, tls=False):
|
| 67 |
+
super().__init__(family, socket.SOCK_STREAM)
|
| 68 |
+
self.stream = stream
|
| 69 |
+
self.tls = tls
|
| 70 |
+
|
| 71 |
+
async def sendall(self, what, timeout):
|
| 72 |
+
with _maybe_timeout(timeout):
|
| 73 |
+
return await self.stream.send_all(what)
|
| 74 |
+
raise dns.exception.Timeout(timeout=timeout) # lgtm[py/unreachable-statement]
|
| 75 |
+
|
| 76 |
+
async def recv(self, size, timeout):
|
| 77 |
+
with _maybe_timeout(timeout):
|
| 78 |
+
return await self.stream.receive_some(size)
|
| 79 |
+
raise dns.exception.Timeout(timeout=timeout) # lgtm[py/unreachable-statement]
|
| 80 |
+
|
| 81 |
+
async def close(self):
|
| 82 |
+
await self.stream.aclose()
|
| 83 |
+
|
| 84 |
+
async def getpeername(self):
|
| 85 |
+
if self.tls:
|
| 86 |
+
return self.stream.transport_stream.socket.getpeername()
|
| 87 |
+
else:
|
| 88 |
+
return self.stream.socket.getpeername()
|
| 89 |
+
|
| 90 |
+
async def getsockname(self):
|
| 91 |
+
if self.tls:
|
| 92 |
+
return self.stream.transport_stream.socket.getsockname()
|
| 93 |
+
else:
|
| 94 |
+
return self.stream.socket.getsockname()
|
| 95 |
+
|
| 96 |
+
async def getpeercert(self, timeout):
|
| 97 |
+
if self.tls:
|
| 98 |
+
with _maybe_timeout(timeout):
|
| 99 |
+
await self.stream.do_handshake()
|
| 100 |
+
return self.stream.getpeercert()
|
| 101 |
+
else:
|
| 102 |
+
raise NotImplementedError
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
if dns._features.have("doh"):
|
| 106 |
+
import httpcore
|
| 107 |
+
import httpcore._backends.trio
|
| 108 |
+
import httpx
|
| 109 |
+
|
| 110 |
+
_CoreAsyncNetworkBackend = httpcore.AsyncNetworkBackend
|
| 111 |
+
_CoreTrioStream = httpcore._backends.trio.TrioStream
|
| 112 |
+
|
| 113 |
+
from dns.query import _compute_times, _expiration_for_this_attempt, _remaining
|
| 114 |
+
|
| 115 |
+
class _NetworkBackend(_CoreAsyncNetworkBackend):
|
| 116 |
+
def __init__(self, resolver, local_port, bootstrap_address, family):
|
| 117 |
+
super().__init__()
|
| 118 |
+
self._local_port = local_port
|
| 119 |
+
self._resolver = resolver
|
| 120 |
+
self._bootstrap_address = bootstrap_address
|
| 121 |
+
self._family = family
|
| 122 |
+
|
| 123 |
+
async def connect_tcp(
|
| 124 |
+
self, host, port, timeout, local_address, socket_options=None
|
| 125 |
+
): # pylint: disable=signature-differs
|
| 126 |
+
addresses = []
|
| 127 |
+
_, expiration = _compute_times(timeout)
|
| 128 |
+
if dns.inet.is_address(host):
|
| 129 |
+
addresses.append(host)
|
| 130 |
+
elif self._bootstrap_address is not None:
|
| 131 |
+
addresses.append(self._bootstrap_address)
|
| 132 |
+
else:
|
| 133 |
+
timeout = _remaining(expiration)
|
| 134 |
+
family = self._family
|
| 135 |
+
if local_address:
|
| 136 |
+
family = dns.inet.af_for_address(local_address)
|
| 137 |
+
answers = await self._resolver.resolve_name(
|
| 138 |
+
host, family=family, lifetime=timeout
|
| 139 |
+
)
|
| 140 |
+
addresses = answers.addresses()
|
| 141 |
+
for address in addresses:
|
| 142 |
+
try:
|
| 143 |
+
af = dns.inet.af_for_address(address)
|
| 144 |
+
if local_address is not None or self._local_port != 0:
|
| 145 |
+
source = (local_address, self._local_port)
|
| 146 |
+
else:
|
| 147 |
+
source = None
|
| 148 |
+
destination = (address, port)
|
| 149 |
+
attempt_expiration = _expiration_for_this_attempt(2.0, expiration)
|
| 150 |
+
timeout = _remaining(attempt_expiration)
|
| 151 |
+
sock = await Backend().make_socket(
|
| 152 |
+
af, socket.SOCK_STREAM, 0, source, destination, timeout
|
| 153 |
+
)
|
| 154 |
+
return _CoreTrioStream(sock.stream)
|
| 155 |
+
except Exception:
|
| 156 |
+
continue
|
| 157 |
+
raise httpcore.ConnectError
|
| 158 |
+
|
| 159 |
+
async def connect_unix_socket(
|
| 160 |
+
self, path, timeout, socket_options=None
|
| 161 |
+
): # pylint: disable=signature-differs
|
| 162 |
+
raise NotImplementedError
|
| 163 |
+
|
| 164 |
+
async def sleep(self, seconds): # pylint: disable=signature-differs
|
| 165 |
+
await trio.sleep(seconds)
|
| 166 |
+
|
| 167 |
+
class _HTTPTransport(httpx.AsyncHTTPTransport):
|
| 168 |
+
def __init__(
|
| 169 |
+
self,
|
| 170 |
+
*args,
|
| 171 |
+
local_port=0,
|
| 172 |
+
bootstrap_address=None,
|
| 173 |
+
resolver=None,
|
| 174 |
+
family=socket.AF_UNSPEC,
|
| 175 |
+
**kwargs,
|
| 176 |
+
):
|
| 177 |
+
if resolver is None and bootstrap_address is None:
|
| 178 |
+
# pylint: disable=import-outside-toplevel,redefined-outer-name
|
| 179 |
+
import dns.asyncresolver
|
| 180 |
+
|
| 181 |
+
resolver = dns.asyncresolver.Resolver()
|
| 182 |
+
super().__init__(*args, **kwargs)
|
| 183 |
+
self._pool._network_backend = _NetworkBackend(
|
| 184 |
+
resolver, local_port, bootstrap_address, family
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
else:
|
| 188 |
+
_HTTPTransport = dns._asyncbackend.NullTransport # type: ignore
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
class Backend(dns._asyncbackend.Backend):
|
| 192 |
+
def name(self):
|
| 193 |
+
return "trio"
|
| 194 |
+
|
| 195 |
+
async def make_socket(
|
| 196 |
+
self,
|
| 197 |
+
af,
|
| 198 |
+
socktype,
|
| 199 |
+
proto=0,
|
| 200 |
+
source=None,
|
| 201 |
+
destination=None,
|
| 202 |
+
timeout=None,
|
| 203 |
+
ssl_context=None,
|
| 204 |
+
server_hostname=None,
|
| 205 |
+
):
|
| 206 |
+
s = trio.socket.socket(af, socktype, proto)
|
| 207 |
+
stream = None
|
| 208 |
+
try:
|
| 209 |
+
if source:
|
| 210 |
+
await s.bind(_lltuple(source, af))
|
| 211 |
+
if socktype == socket.SOCK_STREAM or destination is not None:
|
| 212 |
+
connected = False
|
| 213 |
+
with _maybe_timeout(timeout):
|
| 214 |
+
await s.connect(_lltuple(destination, af))
|
| 215 |
+
connected = True
|
| 216 |
+
if not connected:
|
| 217 |
+
raise dns.exception.Timeout(
|
| 218 |
+
timeout=timeout
|
| 219 |
+
) # lgtm[py/unreachable-statement]
|
| 220 |
+
except Exception: # pragma: no cover
|
| 221 |
+
s.close()
|
| 222 |
+
raise
|
| 223 |
+
if socktype == socket.SOCK_DGRAM:
|
| 224 |
+
return DatagramSocket(s)
|
| 225 |
+
elif socktype == socket.SOCK_STREAM:
|
| 226 |
+
stream = trio.SocketStream(s)
|
| 227 |
+
tls = False
|
| 228 |
+
if ssl_context:
|
| 229 |
+
tls = True
|
| 230 |
+
try:
|
| 231 |
+
stream = trio.SSLStream(
|
| 232 |
+
stream, ssl_context, server_hostname=server_hostname
|
| 233 |
+
)
|
| 234 |
+
except Exception: # pragma: no cover
|
| 235 |
+
await stream.aclose()
|
| 236 |
+
raise
|
| 237 |
+
return StreamSocket(af, stream, tls)
|
| 238 |
+
raise NotImplementedError(
|
| 239 |
+
"unsupported socket " + f"type {socktype}"
|
| 240 |
+
) # pragma: no cover
|
| 241 |
+
|
| 242 |
+
async def sleep(self, interval):
|
| 243 |
+
await trio.sleep(interval)
|
| 244 |
+
|
| 245 |
+
def get_transport_class(self):
|
| 246 |
+
return _HTTPTransport
|
| 247 |
+
|
| 248 |
+
async def wait_for(self, awaitable, timeout):
|
| 249 |
+
with _maybe_timeout(timeout):
|
| 250 |
+
return await awaitable
|
| 251 |
+
raise dns.exception.Timeout(
|
| 252 |
+
timeout=timeout
|
| 253 |
+
) # pragma: no cover lgtm[py/unreachable-statement]
|
vllm/lib/python3.10/site-packages/dns/asyncbackend.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
|
| 2 |
+
|
| 3 |
+
from typing import Dict
|
| 4 |
+
|
| 5 |
+
import dns.exception
|
| 6 |
+
|
| 7 |
+
# pylint: disable=unused-import
|
| 8 |
+
from dns._asyncbackend import ( # noqa: F401 lgtm[py/unused-import]
|
| 9 |
+
Backend,
|
| 10 |
+
DatagramSocket,
|
| 11 |
+
Socket,
|
| 12 |
+
StreamSocket,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
# pylint: enable=unused-import
|
| 16 |
+
|
| 17 |
+
_default_backend = None
|
| 18 |
+
|
| 19 |
+
_backends: Dict[str, Backend] = {}
|
| 20 |
+
|
| 21 |
+
# Allow sniffio import to be disabled for testing purposes
|
| 22 |
+
_no_sniffio = False
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class AsyncLibraryNotFoundError(dns.exception.DNSException):
|
| 26 |
+
pass
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def get_backend(name: str) -> Backend:
|
| 30 |
+
"""Get the specified asynchronous backend.
|
| 31 |
+
|
| 32 |
+
*name*, a ``str``, the name of the backend. Currently the "trio"
|
| 33 |
+
and "asyncio" backends are available.
|
| 34 |
+
|
| 35 |
+
Raises NotImplementedError if an unknown backend name is specified.
|
| 36 |
+
"""
|
| 37 |
+
# pylint: disable=import-outside-toplevel,redefined-outer-name
|
| 38 |
+
backend = _backends.get(name)
|
| 39 |
+
if backend:
|
| 40 |
+
return backend
|
| 41 |
+
if name == "trio":
|
| 42 |
+
import dns._trio_backend
|
| 43 |
+
|
| 44 |
+
backend = dns._trio_backend.Backend()
|
| 45 |
+
elif name == "asyncio":
|
| 46 |
+
import dns._asyncio_backend
|
| 47 |
+
|
| 48 |
+
backend = dns._asyncio_backend.Backend()
|
| 49 |
+
else:
|
| 50 |
+
raise NotImplementedError(f"unimplemented async backend {name}")
|
| 51 |
+
_backends[name] = backend
|
| 52 |
+
return backend
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def sniff() -> str:
|
| 56 |
+
"""Attempt to determine the in-use asynchronous I/O library by using
|
| 57 |
+
the ``sniffio`` module if it is available.
|
| 58 |
+
|
| 59 |
+
Returns the name of the library, or raises AsyncLibraryNotFoundError
|
| 60 |
+
if the library cannot be determined.
|
| 61 |
+
"""
|
| 62 |
+
# pylint: disable=import-outside-toplevel
|
| 63 |
+
try:
|
| 64 |
+
if _no_sniffio:
|
| 65 |
+
raise ImportError
|
| 66 |
+
import sniffio
|
| 67 |
+
|
| 68 |
+
try:
|
| 69 |
+
return sniffio.current_async_library()
|
| 70 |
+
except sniffio.AsyncLibraryNotFoundError:
|
| 71 |
+
raise AsyncLibraryNotFoundError("sniffio cannot determine async library")
|
| 72 |
+
except ImportError:
|
| 73 |
+
import asyncio
|
| 74 |
+
|
| 75 |
+
try:
|
| 76 |
+
asyncio.get_running_loop()
|
| 77 |
+
return "asyncio"
|
| 78 |
+
except RuntimeError:
|
| 79 |
+
raise AsyncLibraryNotFoundError("no async library detected")
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def get_default_backend() -> Backend:
|
| 83 |
+
"""Get the default backend, initializing it if necessary."""
|
| 84 |
+
if _default_backend:
|
| 85 |
+
return _default_backend
|
| 86 |
+
|
| 87 |
+
return set_default_backend(sniff())
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def set_default_backend(name: str) -> Backend:
|
| 91 |
+
"""Set the default backend.
|
| 92 |
+
|
| 93 |
+
It's not normally necessary to call this method, as
|
| 94 |
+
``get_default_backend()`` will initialize the backend
|
| 95 |
+
appropriately in many cases. If ``sniffio`` is not installed, or
|
| 96 |
+
in testing situations, this function allows the backend to be set
|
| 97 |
+
explicitly.
|
| 98 |
+
"""
|
| 99 |
+
global _default_backend
|
| 100 |
+
_default_backend = get_backend(name)
|
| 101 |
+
return _default_backend
|
vllm/lib/python3.10/site-packages/dns/asyncquery.py
ADDED
|
@@ -0,0 +1,913 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
|
| 2 |
+
|
| 3 |
+
# Copyright (C) 2003-2017 Nominum, Inc.
|
| 4 |
+
#
|
| 5 |
+
# Permission to use, copy, modify, and distribute this software and its
|
| 6 |
+
# documentation for any purpose with or without fee is hereby granted,
|
| 7 |
+
# provided that the above copyright notice and this permission notice
|
| 8 |
+
# appear in all copies.
|
| 9 |
+
#
|
| 10 |
+
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
| 11 |
+
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
| 12 |
+
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
| 13 |
+
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
| 14 |
+
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
| 15 |
+
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
| 16 |
+
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
| 17 |
+
|
| 18 |
+
"""Talk to a DNS server."""
|
| 19 |
+
|
| 20 |
+
import base64
|
| 21 |
+
import contextlib
|
| 22 |
+
import random
|
| 23 |
+
import socket
|
| 24 |
+
import struct
|
| 25 |
+
import time
|
| 26 |
+
import urllib.parse
|
| 27 |
+
from typing import Any, Dict, Optional, Tuple, Union, cast
|
| 28 |
+
|
| 29 |
+
import dns.asyncbackend
|
| 30 |
+
import dns.exception
|
| 31 |
+
import dns.inet
|
| 32 |
+
import dns.message
|
| 33 |
+
import dns.name
|
| 34 |
+
import dns.quic
|
| 35 |
+
import dns.rcode
|
| 36 |
+
import dns.rdataclass
|
| 37 |
+
import dns.rdatatype
|
| 38 |
+
import dns.transaction
|
| 39 |
+
from dns._asyncbackend import NullContext
|
| 40 |
+
from dns.query import (
|
| 41 |
+
BadResponse,
|
| 42 |
+
HTTPVersion,
|
| 43 |
+
NoDOH,
|
| 44 |
+
NoDOQ,
|
| 45 |
+
UDPMode,
|
| 46 |
+
_check_status,
|
| 47 |
+
_compute_times,
|
| 48 |
+
_make_dot_ssl_context,
|
| 49 |
+
_matches_destination,
|
| 50 |
+
_remaining,
|
| 51 |
+
have_doh,
|
| 52 |
+
ssl,
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
if have_doh:
|
| 56 |
+
import httpx
|
| 57 |
+
|
| 58 |
+
# for brevity
|
| 59 |
+
_lltuple = dns.inet.low_level_address_tuple
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def _source_tuple(af, address, port):
|
| 63 |
+
# Make a high level source tuple, or return None if address and port
|
| 64 |
+
# are both None
|
| 65 |
+
if address or port:
|
| 66 |
+
if address is None:
|
| 67 |
+
if af == socket.AF_INET:
|
| 68 |
+
address = "0.0.0.0"
|
| 69 |
+
elif af == socket.AF_INET6:
|
| 70 |
+
address = "::"
|
| 71 |
+
else:
|
| 72 |
+
raise NotImplementedError(f"unknown address family {af}")
|
| 73 |
+
return (address, port)
|
| 74 |
+
else:
|
| 75 |
+
return None
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def _timeout(expiration, now=None):
|
| 79 |
+
if expiration is not None:
|
| 80 |
+
if not now:
|
| 81 |
+
now = time.time()
|
| 82 |
+
return max(expiration - now, 0)
|
| 83 |
+
else:
|
| 84 |
+
return None
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
async def send_udp(
|
| 88 |
+
sock: dns.asyncbackend.DatagramSocket,
|
| 89 |
+
what: Union[dns.message.Message, bytes],
|
| 90 |
+
destination: Any,
|
| 91 |
+
expiration: Optional[float] = None,
|
| 92 |
+
) -> Tuple[int, float]:
|
| 93 |
+
"""Send a DNS message to the specified UDP socket.
|
| 94 |
+
|
| 95 |
+
*sock*, a ``dns.asyncbackend.DatagramSocket``.
|
| 96 |
+
|
| 97 |
+
*what*, a ``bytes`` or ``dns.message.Message``, the message to send.
|
| 98 |
+
|
| 99 |
+
*destination*, a destination tuple appropriate for the address family
|
| 100 |
+
of the socket, specifying where to send the query.
|
| 101 |
+
|
| 102 |
+
*expiration*, a ``float`` or ``None``, the absolute time at which
|
| 103 |
+
a timeout exception should be raised. If ``None``, no timeout will
|
| 104 |
+
occur. The expiration value is meaningless for the asyncio backend, as
|
| 105 |
+
asyncio's transport sendto() never blocks.
|
| 106 |
+
|
| 107 |
+
Returns an ``(int, float)`` tuple of bytes sent and the sent time.
|
| 108 |
+
"""
|
| 109 |
+
|
| 110 |
+
if isinstance(what, dns.message.Message):
|
| 111 |
+
what = what.to_wire()
|
| 112 |
+
sent_time = time.time()
|
| 113 |
+
n = await sock.sendto(what, destination, _timeout(expiration, sent_time))
|
| 114 |
+
return (n, sent_time)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
async def receive_udp(
|
| 118 |
+
sock: dns.asyncbackend.DatagramSocket,
|
| 119 |
+
destination: Optional[Any] = None,
|
| 120 |
+
expiration: Optional[float] = None,
|
| 121 |
+
ignore_unexpected: bool = False,
|
| 122 |
+
one_rr_per_rrset: bool = False,
|
| 123 |
+
keyring: Optional[Dict[dns.name.Name, dns.tsig.Key]] = None,
|
| 124 |
+
request_mac: Optional[bytes] = b"",
|
| 125 |
+
ignore_trailing: bool = False,
|
| 126 |
+
raise_on_truncation: bool = False,
|
| 127 |
+
ignore_errors: bool = False,
|
| 128 |
+
query: Optional[dns.message.Message] = None,
|
| 129 |
+
) -> Any:
|
| 130 |
+
"""Read a DNS message from a UDP socket.
|
| 131 |
+
|
| 132 |
+
*sock*, a ``dns.asyncbackend.DatagramSocket``.
|
| 133 |
+
|
| 134 |
+
See :py:func:`dns.query.receive_udp()` for the documentation of the other
|
| 135 |
+
parameters, and exceptions.
|
| 136 |
+
|
| 137 |
+
Returns a ``(dns.message.Message, float, tuple)`` tuple of the received message, the
|
| 138 |
+
received time, and the address where the message arrived from.
|
| 139 |
+
"""
|
| 140 |
+
|
| 141 |
+
wire = b""
|
| 142 |
+
while True:
|
| 143 |
+
(wire, from_address) = await sock.recvfrom(65535, _timeout(expiration))
|
| 144 |
+
if not _matches_destination(
|
| 145 |
+
sock.family, from_address, destination, ignore_unexpected
|
| 146 |
+
):
|
| 147 |
+
continue
|
| 148 |
+
received_time = time.time()
|
| 149 |
+
try:
|
| 150 |
+
r = dns.message.from_wire(
|
| 151 |
+
wire,
|
| 152 |
+
keyring=keyring,
|
| 153 |
+
request_mac=request_mac,
|
| 154 |
+
one_rr_per_rrset=one_rr_per_rrset,
|
| 155 |
+
ignore_trailing=ignore_trailing,
|
| 156 |
+
raise_on_truncation=raise_on_truncation,
|
| 157 |
+
)
|
| 158 |
+
except dns.message.Truncated as e:
|
| 159 |
+
# See the comment in query.py for details.
|
| 160 |
+
if (
|
| 161 |
+
ignore_errors
|
| 162 |
+
and query is not None
|
| 163 |
+
and not query.is_response(e.message())
|
| 164 |
+
):
|
| 165 |
+
continue
|
| 166 |
+
else:
|
| 167 |
+
raise
|
| 168 |
+
except Exception:
|
| 169 |
+
if ignore_errors:
|
| 170 |
+
continue
|
| 171 |
+
else:
|
| 172 |
+
raise
|
| 173 |
+
if ignore_errors and query is not None and not query.is_response(r):
|
| 174 |
+
continue
|
| 175 |
+
return (r, received_time, from_address)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
async def udp(
|
| 179 |
+
q: dns.message.Message,
|
| 180 |
+
where: str,
|
| 181 |
+
timeout: Optional[float] = None,
|
| 182 |
+
port: int = 53,
|
| 183 |
+
source: Optional[str] = None,
|
| 184 |
+
source_port: int = 0,
|
| 185 |
+
ignore_unexpected: bool = False,
|
| 186 |
+
one_rr_per_rrset: bool = False,
|
| 187 |
+
ignore_trailing: bool = False,
|
| 188 |
+
raise_on_truncation: bool = False,
|
| 189 |
+
sock: Optional[dns.asyncbackend.DatagramSocket] = None,
|
| 190 |
+
backend: Optional[dns.asyncbackend.Backend] = None,
|
| 191 |
+
ignore_errors: bool = False,
|
| 192 |
+
) -> dns.message.Message:
|
| 193 |
+
"""Return the response obtained after sending a query via UDP.
|
| 194 |
+
|
| 195 |
+
*sock*, a ``dns.asyncbackend.DatagramSocket``, or ``None``,
|
| 196 |
+
the socket to use for the query. If ``None``, the default, a
|
| 197 |
+
socket is created. Note that if a socket is provided, the
|
| 198 |
+
*source*, *source_port*, and *backend* are ignored.
|
| 199 |
+
|
| 200 |
+
*backend*, a ``dns.asyncbackend.Backend``, or ``None``. If ``None``,
|
| 201 |
+
the default, then dnspython will use the default backend.
|
| 202 |
+
|
| 203 |
+
See :py:func:`dns.query.udp()` for the documentation of the other
|
| 204 |
+
parameters, exceptions, and return type of this method.
|
| 205 |
+
"""
|
| 206 |
+
wire = q.to_wire()
|
| 207 |
+
(begin_time, expiration) = _compute_times(timeout)
|
| 208 |
+
af = dns.inet.af_for_address(where)
|
| 209 |
+
destination = _lltuple((where, port), af)
|
| 210 |
+
if sock:
|
| 211 |
+
cm: contextlib.AbstractAsyncContextManager = NullContext(sock)
|
| 212 |
+
else:
|
| 213 |
+
if not backend:
|
| 214 |
+
backend = dns.asyncbackend.get_default_backend()
|
| 215 |
+
stuple = _source_tuple(af, source, source_port)
|
| 216 |
+
if backend.datagram_connection_required():
|
| 217 |
+
dtuple = (where, port)
|
| 218 |
+
else:
|
| 219 |
+
dtuple = None
|
| 220 |
+
cm = await backend.make_socket(af, socket.SOCK_DGRAM, 0, stuple, dtuple)
|
| 221 |
+
async with cm as s:
|
| 222 |
+
await send_udp(s, wire, destination, expiration)
|
| 223 |
+
(r, received_time, _) = await receive_udp(
|
| 224 |
+
s,
|
| 225 |
+
destination,
|
| 226 |
+
expiration,
|
| 227 |
+
ignore_unexpected,
|
| 228 |
+
one_rr_per_rrset,
|
| 229 |
+
q.keyring,
|
| 230 |
+
q.mac,
|
| 231 |
+
ignore_trailing,
|
| 232 |
+
raise_on_truncation,
|
| 233 |
+
ignore_errors,
|
| 234 |
+
q,
|
| 235 |
+
)
|
| 236 |
+
r.time = received_time - begin_time
|
| 237 |
+
# We don't need to check q.is_response() if we are in ignore_errors mode
|
| 238 |
+
# as receive_udp() will have checked it.
|
| 239 |
+
if not (ignore_errors or q.is_response(r)):
|
| 240 |
+
raise BadResponse
|
| 241 |
+
return r
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
async def udp_with_fallback(
|
| 245 |
+
q: dns.message.Message,
|
| 246 |
+
where: str,
|
| 247 |
+
timeout: Optional[float] = None,
|
| 248 |
+
port: int = 53,
|
| 249 |
+
source: Optional[str] = None,
|
| 250 |
+
source_port: int = 0,
|
| 251 |
+
ignore_unexpected: bool = False,
|
| 252 |
+
one_rr_per_rrset: bool = False,
|
| 253 |
+
ignore_trailing: bool = False,
|
| 254 |
+
udp_sock: Optional[dns.asyncbackend.DatagramSocket] = None,
|
| 255 |
+
tcp_sock: Optional[dns.asyncbackend.StreamSocket] = None,
|
| 256 |
+
backend: Optional[dns.asyncbackend.Backend] = None,
|
| 257 |
+
ignore_errors: bool = False,
|
| 258 |
+
) -> Tuple[dns.message.Message, bool]:
|
| 259 |
+
"""Return the response to the query, trying UDP first and falling back
|
| 260 |
+
to TCP if UDP results in a truncated response.
|
| 261 |
+
|
| 262 |
+
*udp_sock*, a ``dns.asyncbackend.DatagramSocket``, or ``None``,
|
| 263 |
+
the socket to use for the UDP query. If ``None``, the default, a
|
| 264 |
+
socket is created. Note that if a socket is provided the *source*,
|
| 265 |
+
*source_port*, and *backend* are ignored for the UDP query.
|
| 266 |
+
|
| 267 |
+
*tcp_sock*, a ``dns.asyncbackend.StreamSocket``, or ``None``, the
|
| 268 |
+
socket to use for the TCP query. If ``None``, the default, a
|
| 269 |
+
socket is created. Note that if a socket is provided *where*,
|
| 270 |
+
*source*, *source_port*, and *backend* are ignored for the TCP query.
|
| 271 |
+
|
| 272 |
+
*backend*, a ``dns.asyncbackend.Backend``, or ``None``. If ``None``,
|
| 273 |
+
the default, then dnspython will use the default backend.
|
| 274 |
+
|
| 275 |
+
See :py:func:`dns.query.udp_with_fallback()` for the documentation
|
| 276 |
+
of the other parameters, exceptions, and return type of this
|
| 277 |
+
method.
|
| 278 |
+
"""
|
| 279 |
+
try:
|
| 280 |
+
response = await udp(
|
| 281 |
+
q,
|
| 282 |
+
where,
|
| 283 |
+
timeout,
|
| 284 |
+
port,
|
| 285 |
+
source,
|
| 286 |
+
source_port,
|
| 287 |
+
ignore_unexpected,
|
| 288 |
+
one_rr_per_rrset,
|
| 289 |
+
ignore_trailing,
|
| 290 |
+
True,
|
| 291 |
+
udp_sock,
|
| 292 |
+
backend,
|
| 293 |
+
ignore_errors,
|
| 294 |
+
)
|
| 295 |
+
return (response, False)
|
| 296 |
+
except dns.message.Truncated:
|
| 297 |
+
response = await tcp(
|
| 298 |
+
q,
|
| 299 |
+
where,
|
| 300 |
+
timeout,
|
| 301 |
+
port,
|
| 302 |
+
source,
|
| 303 |
+
source_port,
|
| 304 |
+
one_rr_per_rrset,
|
| 305 |
+
ignore_trailing,
|
| 306 |
+
tcp_sock,
|
| 307 |
+
backend,
|
| 308 |
+
)
|
| 309 |
+
return (response, True)
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
async def send_tcp(
|
| 313 |
+
sock: dns.asyncbackend.StreamSocket,
|
| 314 |
+
what: Union[dns.message.Message, bytes],
|
| 315 |
+
expiration: Optional[float] = None,
|
| 316 |
+
) -> Tuple[int, float]:
|
| 317 |
+
"""Send a DNS message to the specified TCP socket.
|
| 318 |
+
|
| 319 |
+
*sock*, a ``dns.asyncbackend.StreamSocket``.
|
| 320 |
+
|
| 321 |
+
See :py:func:`dns.query.send_tcp()` for the documentation of the other
|
| 322 |
+
parameters, exceptions, and return type of this method.
|
| 323 |
+
"""
|
| 324 |
+
|
| 325 |
+
if isinstance(what, dns.message.Message):
|
| 326 |
+
tcpmsg = what.to_wire(prepend_length=True)
|
| 327 |
+
else:
|
| 328 |
+
# copying the wire into tcpmsg is inefficient, but lets us
|
| 329 |
+
# avoid writev() or doing a short write that would get pushed
|
| 330 |
+
# onto the net
|
| 331 |
+
tcpmsg = len(what).to_bytes(2, "big") + what
|
| 332 |
+
sent_time = time.time()
|
| 333 |
+
await sock.sendall(tcpmsg, _timeout(expiration, sent_time))
|
| 334 |
+
return (len(tcpmsg), sent_time)
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
async def _read_exactly(sock, count, expiration):
|
| 338 |
+
"""Read the specified number of bytes from stream. Keep trying until we
|
| 339 |
+
either get the desired amount, or we hit EOF.
|
| 340 |
+
"""
|
| 341 |
+
s = b""
|
| 342 |
+
while count > 0:
|
| 343 |
+
n = await sock.recv(count, _timeout(expiration))
|
| 344 |
+
if n == b"":
|
| 345 |
+
raise EOFError("EOF")
|
| 346 |
+
count = count - len(n)
|
| 347 |
+
s = s + n
|
| 348 |
+
return s
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
async def receive_tcp(
|
| 352 |
+
sock: dns.asyncbackend.StreamSocket,
|
| 353 |
+
expiration: Optional[float] = None,
|
| 354 |
+
one_rr_per_rrset: bool = False,
|
| 355 |
+
keyring: Optional[Dict[dns.name.Name, dns.tsig.Key]] = None,
|
| 356 |
+
request_mac: Optional[bytes] = b"",
|
| 357 |
+
ignore_trailing: bool = False,
|
| 358 |
+
) -> Tuple[dns.message.Message, float]:
|
| 359 |
+
"""Read a DNS message from a TCP socket.
|
| 360 |
+
|
| 361 |
+
*sock*, a ``dns.asyncbackend.StreamSocket``.
|
| 362 |
+
|
| 363 |
+
See :py:func:`dns.query.receive_tcp()` for the documentation of the other
|
| 364 |
+
parameters, exceptions, and return type of this method.
|
| 365 |
+
"""
|
| 366 |
+
|
| 367 |
+
ldata = await _read_exactly(sock, 2, expiration)
|
| 368 |
+
(l,) = struct.unpack("!H", ldata)
|
| 369 |
+
wire = await _read_exactly(sock, l, expiration)
|
| 370 |
+
received_time = time.time()
|
| 371 |
+
r = dns.message.from_wire(
|
| 372 |
+
wire,
|
| 373 |
+
keyring=keyring,
|
| 374 |
+
request_mac=request_mac,
|
| 375 |
+
one_rr_per_rrset=one_rr_per_rrset,
|
| 376 |
+
ignore_trailing=ignore_trailing,
|
| 377 |
+
)
|
| 378 |
+
return (r, received_time)
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
async def tcp(
|
| 382 |
+
q: dns.message.Message,
|
| 383 |
+
where: str,
|
| 384 |
+
timeout: Optional[float] = None,
|
| 385 |
+
port: int = 53,
|
| 386 |
+
source: Optional[str] = None,
|
| 387 |
+
source_port: int = 0,
|
| 388 |
+
one_rr_per_rrset: bool = False,
|
| 389 |
+
ignore_trailing: bool = False,
|
| 390 |
+
sock: Optional[dns.asyncbackend.StreamSocket] = None,
|
| 391 |
+
backend: Optional[dns.asyncbackend.Backend] = None,
|
| 392 |
+
) -> dns.message.Message:
|
| 393 |
+
"""Return the response obtained after sending a query via TCP.
|
| 394 |
+
|
| 395 |
+
*sock*, a ``dns.asyncbacket.StreamSocket``, or ``None``, the
|
| 396 |
+
socket to use for the query. If ``None``, the default, a socket
|
| 397 |
+
is created. Note that if a socket is provided
|
| 398 |
+
*where*, *port*, *source*, *source_port*, and *backend* are ignored.
|
| 399 |
+
|
| 400 |
+
*backend*, a ``dns.asyncbackend.Backend``, or ``None``. If ``None``,
|
| 401 |
+
the default, then dnspython will use the default backend.
|
| 402 |
+
|
| 403 |
+
See :py:func:`dns.query.tcp()` for the documentation of the other
|
| 404 |
+
parameters, exceptions, and return type of this method.
|
| 405 |
+
"""
|
| 406 |
+
|
| 407 |
+
wire = q.to_wire()
|
| 408 |
+
(begin_time, expiration) = _compute_times(timeout)
|
| 409 |
+
if sock:
|
| 410 |
+
# Verify that the socket is connected, as if it's not connected,
|
| 411 |
+
# it's not writable, and the polling in send_tcp() will time out or
|
| 412 |
+
# hang forever.
|
| 413 |
+
await sock.getpeername()
|
| 414 |
+
cm: contextlib.AbstractAsyncContextManager = NullContext(sock)
|
| 415 |
+
else:
|
| 416 |
+
# These are simple (address, port) pairs, not family-dependent tuples
|
| 417 |
+
# you pass to low-level socket code.
|
| 418 |
+
af = dns.inet.af_for_address(where)
|
| 419 |
+
stuple = _source_tuple(af, source, source_port)
|
| 420 |
+
dtuple = (where, port)
|
| 421 |
+
if not backend:
|
| 422 |
+
backend = dns.asyncbackend.get_default_backend()
|
| 423 |
+
cm = await backend.make_socket(
|
| 424 |
+
af, socket.SOCK_STREAM, 0, stuple, dtuple, timeout
|
| 425 |
+
)
|
| 426 |
+
async with cm as s:
|
| 427 |
+
await send_tcp(s, wire, expiration)
|
| 428 |
+
(r, received_time) = await receive_tcp(
|
| 429 |
+
s, expiration, one_rr_per_rrset, q.keyring, q.mac, ignore_trailing
|
| 430 |
+
)
|
| 431 |
+
r.time = received_time - begin_time
|
| 432 |
+
if not q.is_response(r):
|
| 433 |
+
raise BadResponse
|
| 434 |
+
return r
|
| 435 |
+
|
| 436 |
+
|
| 437 |
+
async def tls(
|
| 438 |
+
q: dns.message.Message,
|
| 439 |
+
where: str,
|
| 440 |
+
timeout: Optional[float] = None,
|
| 441 |
+
port: int = 853,
|
| 442 |
+
source: Optional[str] = None,
|
| 443 |
+
source_port: int = 0,
|
| 444 |
+
one_rr_per_rrset: bool = False,
|
| 445 |
+
ignore_trailing: bool = False,
|
| 446 |
+
sock: Optional[dns.asyncbackend.StreamSocket] = None,
|
| 447 |
+
backend: Optional[dns.asyncbackend.Backend] = None,
|
| 448 |
+
ssl_context: Optional[ssl.SSLContext] = None,
|
| 449 |
+
server_hostname: Optional[str] = None,
|
| 450 |
+
verify: Union[bool, str] = True,
|
| 451 |
+
) -> dns.message.Message:
|
| 452 |
+
"""Return the response obtained after sending a query via TLS.
|
| 453 |
+
|
| 454 |
+
*sock*, an ``asyncbackend.StreamSocket``, or ``None``, the socket
|
| 455 |
+
to use for the query. If ``None``, the default, a socket is
|
| 456 |
+
created. Note that if a socket is provided, it must be a
|
| 457 |
+
connected SSL stream socket, and *where*, *port*,
|
| 458 |
+
*source*, *source_port*, *backend*, *ssl_context*, and *server_hostname*
|
| 459 |
+
are ignored.
|
| 460 |
+
|
| 461 |
+
*backend*, a ``dns.asyncbackend.Backend``, or ``None``. If ``None``,
|
| 462 |
+
the default, then dnspython will use the default backend.
|
| 463 |
+
|
| 464 |
+
See :py:func:`dns.query.tls()` for the documentation of the other
|
| 465 |
+
parameters, exceptions, and return type of this method.
|
| 466 |
+
"""
|
| 467 |
+
(begin_time, expiration) = _compute_times(timeout)
|
| 468 |
+
if sock:
|
| 469 |
+
cm: contextlib.AbstractAsyncContextManager = NullContext(sock)
|
| 470 |
+
else:
|
| 471 |
+
if ssl_context is None:
|
| 472 |
+
ssl_context = _make_dot_ssl_context(server_hostname, verify)
|
| 473 |
+
af = dns.inet.af_for_address(where)
|
| 474 |
+
stuple = _source_tuple(af, source, source_port)
|
| 475 |
+
dtuple = (where, port)
|
| 476 |
+
if not backend:
|
| 477 |
+
backend = dns.asyncbackend.get_default_backend()
|
| 478 |
+
cm = await backend.make_socket(
|
| 479 |
+
af,
|
| 480 |
+
socket.SOCK_STREAM,
|
| 481 |
+
0,
|
| 482 |
+
stuple,
|
| 483 |
+
dtuple,
|
| 484 |
+
timeout,
|
| 485 |
+
ssl_context,
|
| 486 |
+
server_hostname,
|
| 487 |
+
)
|
| 488 |
+
async with cm as s:
|
| 489 |
+
timeout = _timeout(expiration)
|
| 490 |
+
response = await tcp(
|
| 491 |
+
q,
|
| 492 |
+
where,
|
| 493 |
+
timeout,
|
| 494 |
+
port,
|
| 495 |
+
source,
|
| 496 |
+
source_port,
|
| 497 |
+
one_rr_per_rrset,
|
| 498 |
+
ignore_trailing,
|
| 499 |
+
s,
|
| 500 |
+
backend,
|
| 501 |
+
)
|
| 502 |
+
end_time = time.time()
|
| 503 |
+
response.time = end_time - begin_time
|
| 504 |
+
return response
|
| 505 |
+
|
| 506 |
+
|
| 507 |
+
def _maybe_get_resolver(
|
| 508 |
+
resolver: Optional["dns.asyncresolver.Resolver"],
|
| 509 |
+
) -> "dns.asyncresolver.Resolver":
|
| 510 |
+
# We need a separate method for this to avoid overriding the global
|
| 511 |
+
# variable "dns" with the as-yet undefined local variable "dns"
|
| 512 |
+
# in https().
|
| 513 |
+
if resolver is None:
|
| 514 |
+
# pylint: disable=import-outside-toplevel,redefined-outer-name
|
| 515 |
+
import dns.asyncresolver
|
| 516 |
+
|
| 517 |
+
resolver = dns.asyncresolver.Resolver()
|
| 518 |
+
return resolver
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
async def https(
|
| 522 |
+
q: dns.message.Message,
|
| 523 |
+
where: str,
|
| 524 |
+
timeout: Optional[float] = None,
|
| 525 |
+
port: int = 443,
|
| 526 |
+
source: Optional[str] = None,
|
| 527 |
+
source_port: int = 0, # pylint: disable=W0613
|
| 528 |
+
one_rr_per_rrset: bool = False,
|
| 529 |
+
ignore_trailing: bool = False,
|
| 530 |
+
client: Optional["httpx.AsyncClient"] = None,
|
| 531 |
+
path: str = "/dns-query",
|
| 532 |
+
post: bool = True,
|
| 533 |
+
verify: Union[bool, str] = True,
|
| 534 |
+
bootstrap_address: Optional[str] = None,
|
| 535 |
+
resolver: Optional["dns.asyncresolver.Resolver"] = None,
|
| 536 |
+
family: int = socket.AF_UNSPEC,
|
| 537 |
+
http_version: HTTPVersion = HTTPVersion.DEFAULT,
|
| 538 |
+
) -> dns.message.Message:
|
| 539 |
+
"""Return the response obtained after sending a query via DNS-over-HTTPS.
|
| 540 |
+
|
| 541 |
+
*client*, a ``httpx.AsyncClient``. If provided, the client to use for
|
| 542 |
+
the query.
|
| 543 |
+
|
| 544 |
+
Unlike the other dnspython async functions, a backend cannot be provided
|
| 545 |
+
in this function because httpx always auto-detects the async backend.
|
| 546 |
+
|
| 547 |
+
See :py:func:`dns.query.https()` for the documentation of the other
|
| 548 |
+
parameters, exceptions, and return type of this method.
|
| 549 |
+
"""
|
| 550 |
+
|
| 551 |
+
try:
|
| 552 |
+
af = dns.inet.af_for_address(where)
|
| 553 |
+
except ValueError:
|
| 554 |
+
af = None
|
| 555 |
+
if af is not None and dns.inet.is_address(where):
|
| 556 |
+
if af == socket.AF_INET:
|
| 557 |
+
url = f"https://{where}:{port}{path}"
|
| 558 |
+
elif af == socket.AF_INET6:
|
| 559 |
+
url = f"https://[{where}]:{port}{path}"
|
| 560 |
+
else:
|
| 561 |
+
url = where
|
| 562 |
+
|
| 563 |
+
extensions = {}
|
| 564 |
+
if bootstrap_address is None:
|
| 565 |
+
# pylint: disable=possibly-used-before-assignment
|
| 566 |
+
parsed = urllib.parse.urlparse(url)
|
| 567 |
+
if parsed.hostname is None:
|
| 568 |
+
raise ValueError("no hostname in URL")
|
| 569 |
+
if dns.inet.is_address(parsed.hostname):
|
| 570 |
+
bootstrap_address = parsed.hostname
|
| 571 |
+
extensions["sni_hostname"] = parsed.hostname
|
| 572 |
+
if parsed.port is not None:
|
| 573 |
+
port = parsed.port
|
| 574 |
+
|
| 575 |
+
if http_version == HTTPVersion.H3 or (
|
| 576 |
+
http_version == HTTPVersion.DEFAULT and not have_doh
|
| 577 |
+
):
|
| 578 |
+
if bootstrap_address is None:
|
| 579 |
+
resolver = _maybe_get_resolver(resolver)
|
| 580 |
+
assert parsed.hostname is not None # for mypy
|
| 581 |
+
answers = await resolver.resolve_name(parsed.hostname, family)
|
| 582 |
+
bootstrap_address = random.choice(list(answers.addresses()))
|
| 583 |
+
return await _http3(
|
| 584 |
+
q,
|
| 585 |
+
bootstrap_address,
|
| 586 |
+
url,
|
| 587 |
+
timeout,
|
| 588 |
+
port,
|
| 589 |
+
source,
|
| 590 |
+
source_port,
|
| 591 |
+
one_rr_per_rrset,
|
| 592 |
+
ignore_trailing,
|
| 593 |
+
verify=verify,
|
| 594 |
+
post=post,
|
| 595 |
+
)
|
| 596 |
+
|
| 597 |
+
if not have_doh:
|
| 598 |
+
raise NoDOH # pragma: no cover
|
| 599 |
+
# pylint: disable=possibly-used-before-assignment
|
| 600 |
+
if client and not isinstance(client, httpx.AsyncClient):
|
| 601 |
+
raise ValueError("session parameter must be an httpx.AsyncClient")
|
| 602 |
+
# pylint: enable=possibly-used-before-assignment
|
| 603 |
+
|
| 604 |
+
wire = q.to_wire()
|
| 605 |
+
headers = {"accept": "application/dns-message"}
|
| 606 |
+
|
| 607 |
+
h1 = http_version in (HTTPVersion.H1, HTTPVersion.DEFAULT)
|
| 608 |
+
h2 = http_version in (HTTPVersion.H2, HTTPVersion.DEFAULT)
|
| 609 |
+
|
| 610 |
+
backend = dns.asyncbackend.get_default_backend()
|
| 611 |
+
|
| 612 |
+
if source is None:
|
| 613 |
+
local_address = None
|
| 614 |
+
local_port = 0
|
| 615 |
+
else:
|
| 616 |
+
local_address = source
|
| 617 |
+
local_port = source_port
|
| 618 |
+
|
| 619 |
+
if client:
|
| 620 |
+
cm: contextlib.AbstractAsyncContextManager = NullContext(client)
|
| 621 |
+
else:
|
| 622 |
+
transport = backend.get_transport_class()(
|
| 623 |
+
local_address=local_address,
|
| 624 |
+
http1=h1,
|
| 625 |
+
http2=h2,
|
| 626 |
+
verify=verify,
|
| 627 |
+
local_port=local_port,
|
| 628 |
+
bootstrap_address=bootstrap_address,
|
| 629 |
+
resolver=resolver,
|
| 630 |
+
family=family,
|
| 631 |
+
)
|
| 632 |
+
|
| 633 |
+
cm = httpx.AsyncClient(http1=h1, http2=h2, verify=verify, transport=transport)
|
| 634 |
+
|
| 635 |
+
async with cm as the_client:
|
| 636 |
+
# see https://tools.ietf.org/html/rfc8484#section-4.1.1 for DoH
|
| 637 |
+
# GET and POST examples
|
| 638 |
+
if post:
|
| 639 |
+
headers.update(
|
| 640 |
+
{
|
| 641 |
+
"content-type": "application/dns-message",
|
| 642 |
+
"content-length": str(len(wire)),
|
| 643 |
+
}
|
| 644 |
+
)
|
| 645 |
+
response = await backend.wait_for(
|
| 646 |
+
the_client.post(
|
| 647 |
+
url,
|
| 648 |
+
headers=headers,
|
| 649 |
+
content=wire,
|
| 650 |
+
extensions=extensions,
|
| 651 |
+
),
|
| 652 |
+
timeout,
|
| 653 |
+
)
|
| 654 |
+
else:
|
| 655 |
+
wire = base64.urlsafe_b64encode(wire).rstrip(b"=")
|
| 656 |
+
twire = wire.decode() # httpx does a repr() if we give it bytes
|
| 657 |
+
response = await backend.wait_for(
|
| 658 |
+
the_client.get(
|
| 659 |
+
url,
|
| 660 |
+
headers=headers,
|
| 661 |
+
params={"dns": twire},
|
| 662 |
+
extensions=extensions,
|
| 663 |
+
),
|
| 664 |
+
timeout,
|
| 665 |
+
)
|
| 666 |
+
|
| 667 |
+
# see https://tools.ietf.org/html/rfc8484#section-4.2.1 for info about DoH
|
| 668 |
+
# status codes
|
| 669 |
+
if response.status_code < 200 or response.status_code > 299:
|
| 670 |
+
raise ValueError(
|
| 671 |
+
f"{where} responded with status code {response.status_code}"
|
| 672 |
+
f"\nResponse body: {response.content!r}"
|
| 673 |
+
)
|
| 674 |
+
r = dns.message.from_wire(
|
| 675 |
+
response.content,
|
| 676 |
+
keyring=q.keyring,
|
| 677 |
+
request_mac=q.request_mac,
|
| 678 |
+
one_rr_per_rrset=one_rr_per_rrset,
|
| 679 |
+
ignore_trailing=ignore_trailing,
|
| 680 |
+
)
|
| 681 |
+
r.time = response.elapsed.total_seconds()
|
| 682 |
+
if not q.is_response(r):
|
| 683 |
+
raise BadResponse
|
| 684 |
+
return r
|
| 685 |
+
|
| 686 |
+
|
| 687 |
+
async def _http3(
|
| 688 |
+
q: dns.message.Message,
|
| 689 |
+
where: str,
|
| 690 |
+
url: str,
|
| 691 |
+
timeout: Optional[float] = None,
|
| 692 |
+
port: int = 853,
|
| 693 |
+
source: Optional[str] = None,
|
| 694 |
+
source_port: int = 0,
|
| 695 |
+
one_rr_per_rrset: bool = False,
|
| 696 |
+
ignore_trailing: bool = False,
|
| 697 |
+
verify: Union[bool, str] = True,
|
| 698 |
+
backend: Optional[dns.asyncbackend.Backend] = None,
|
| 699 |
+
hostname: Optional[str] = None,
|
| 700 |
+
post: bool = True,
|
| 701 |
+
) -> dns.message.Message:
|
| 702 |
+
if not dns.quic.have_quic:
|
| 703 |
+
raise NoDOH("DNS-over-HTTP3 is not available.") # pragma: no cover
|
| 704 |
+
|
| 705 |
+
url_parts = urllib.parse.urlparse(url)
|
| 706 |
+
hostname = url_parts.hostname
|
| 707 |
+
if url_parts.port is not None:
|
| 708 |
+
port = url_parts.port
|
| 709 |
+
|
| 710 |
+
q.id = 0
|
| 711 |
+
wire = q.to_wire()
|
| 712 |
+
(cfactory, mfactory) = dns.quic.factories_for_backend(backend)
|
| 713 |
+
|
| 714 |
+
async with cfactory() as context:
|
| 715 |
+
async with mfactory(
|
| 716 |
+
context, verify_mode=verify, server_name=hostname, h3=True
|
| 717 |
+
) as the_manager:
|
| 718 |
+
the_connection = the_manager.connect(where, port, source, source_port)
|
| 719 |
+
(start, expiration) = _compute_times(timeout)
|
| 720 |
+
stream = await the_connection.make_stream(timeout)
|
| 721 |
+
async with stream:
|
| 722 |
+
# note that send_h3() does not need await
|
| 723 |
+
stream.send_h3(url, wire, post)
|
| 724 |
+
wire = await stream.receive(_remaining(expiration))
|
| 725 |
+
_check_status(stream.headers(), where, wire)
|
| 726 |
+
finish = time.time()
|
| 727 |
+
r = dns.message.from_wire(
|
| 728 |
+
wire,
|
| 729 |
+
keyring=q.keyring,
|
| 730 |
+
request_mac=q.request_mac,
|
| 731 |
+
one_rr_per_rrset=one_rr_per_rrset,
|
| 732 |
+
ignore_trailing=ignore_trailing,
|
| 733 |
+
)
|
| 734 |
+
r.time = max(finish - start, 0.0)
|
| 735 |
+
if not q.is_response(r):
|
| 736 |
+
raise BadResponse
|
| 737 |
+
return r
|
| 738 |
+
|
| 739 |
+
|
| 740 |
+
async def quic(
|
| 741 |
+
q: dns.message.Message,
|
| 742 |
+
where: str,
|
| 743 |
+
timeout: Optional[float] = None,
|
| 744 |
+
port: int = 853,
|
| 745 |
+
source: Optional[str] = None,
|
| 746 |
+
source_port: int = 0,
|
| 747 |
+
one_rr_per_rrset: bool = False,
|
| 748 |
+
ignore_trailing: bool = False,
|
| 749 |
+
connection: Optional[dns.quic.AsyncQuicConnection] = None,
|
| 750 |
+
verify: Union[bool, str] = True,
|
| 751 |
+
backend: Optional[dns.asyncbackend.Backend] = None,
|
| 752 |
+
hostname: Optional[str] = None,
|
| 753 |
+
server_hostname: Optional[str] = None,
|
| 754 |
+
) -> dns.message.Message:
|
| 755 |
+
"""Return the response obtained after sending an asynchronous query via
|
| 756 |
+
DNS-over-QUIC.
|
| 757 |
+
|
| 758 |
+
*backend*, a ``dns.asyncbackend.Backend``, or ``None``. If ``None``,
|
| 759 |
+
the default, then dnspython will use the default backend.
|
| 760 |
+
|
| 761 |
+
See :py:func:`dns.query.quic()` for the documentation of the other
|
| 762 |
+
parameters, exceptions, and return type of this method.
|
| 763 |
+
"""
|
| 764 |
+
|
| 765 |
+
if not dns.quic.have_quic:
|
| 766 |
+
raise NoDOQ("DNS-over-QUIC is not available.") # pragma: no cover
|
| 767 |
+
|
| 768 |
+
if server_hostname is not None and hostname is None:
|
| 769 |
+
hostname = server_hostname
|
| 770 |
+
|
| 771 |
+
q.id = 0
|
| 772 |
+
wire = q.to_wire()
|
| 773 |
+
the_connection: dns.quic.AsyncQuicConnection
|
| 774 |
+
if connection:
|
| 775 |
+
cfactory = dns.quic.null_factory
|
| 776 |
+
mfactory = dns.quic.null_factory
|
| 777 |
+
the_connection = connection
|
| 778 |
+
else:
|
| 779 |
+
(cfactory, mfactory) = dns.quic.factories_for_backend(backend)
|
| 780 |
+
|
| 781 |
+
async with cfactory() as context:
|
| 782 |
+
async with mfactory(
|
| 783 |
+
context,
|
| 784 |
+
verify_mode=verify,
|
| 785 |
+
server_name=server_hostname,
|
| 786 |
+
) as the_manager:
|
| 787 |
+
if not connection:
|
| 788 |
+
the_connection = the_manager.connect(where, port, source, source_port)
|
| 789 |
+
(start, expiration) = _compute_times(timeout)
|
| 790 |
+
stream = await the_connection.make_stream(timeout)
|
| 791 |
+
async with stream:
|
| 792 |
+
await stream.send(wire, True)
|
| 793 |
+
wire = await stream.receive(_remaining(expiration))
|
| 794 |
+
finish = time.time()
|
| 795 |
+
r = dns.message.from_wire(
|
| 796 |
+
wire,
|
| 797 |
+
keyring=q.keyring,
|
| 798 |
+
request_mac=q.request_mac,
|
| 799 |
+
one_rr_per_rrset=one_rr_per_rrset,
|
| 800 |
+
ignore_trailing=ignore_trailing,
|
| 801 |
+
)
|
| 802 |
+
r.time = max(finish - start, 0.0)
|
| 803 |
+
if not q.is_response(r):
|
| 804 |
+
raise BadResponse
|
| 805 |
+
return r
|
| 806 |
+
|
| 807 |
+
|
| 808 |
+
async def _inbound_xfr(
|
| 809 |
+
txn_manager: dns.transaction.TransactionManager,
|
| 810 |
+
s: dns.asyncbackend.Socket,
|
| 811 |
+
query: dns.message.Message,
|
| 812 |
+
serial: Optional[int],
|
| 813 |
+
timeout: Optional[float],
|
| 814 |
+
expiration: float,
|
| 815 |
+
) -> Any:
|
| 816 |
+
"""Given a socket, does the zone transfer."""
|
| 817 |
+
rdtype = query.question[0].rdtype
|
| 818 |
+
is_ixfr = rdtype == dns.rdatatype.IXFR
|
| 819 |
+
origin = txn_manager.from_wire_origin()
|
| 820 |
+
wire = query.to_wire()
|
| 821 |
+
is_udp = s.type == socket.SOCK_DGRAM
|
| 822 |
+
if is_udp:
|
| 823 |
+
udp_sock = cast(dns.asyncbackend.DatagramSocket, s)
|
| 824 |
+
await udp_sock.sendto(wire, None, _timeout(expiration))
|
| 825 |
+
else:
|
| 826 |
+
tcp_sock = cast(dns.asyncbackend.StreamSocket, s)
|
| 827 |
+
tcpmsg = struct.pack("!H", len(wire)) + wire
|
| 828 |
+
await tcp_sock.sendall(tcpmsg, expiration)
|
| 829 |
+
with dns.xfr.Inbound(txn_manager, rdtype, serial, is_udp) as inbound:
|
| 830 |
+
done = False
|
| 831 |
+
tsig_ctx = None
|
| 832 |
+
while not done:
|
| 833 |
+
(_, mexpiration) = _compute_times(timeout)
|
| 834 |
+
if mexpiration is None or (
|
| 835 |
+
expiration is not None and mexpiration > expiration
|
| 836 |
+
):
|
| 837 |
+
mexpiration = expiration
|
| 838 |
+
if is_udp:
|
| 839 |
+
timeout = _timeout(mexpiration)
|
| 840 |
+
(rwire, _) = await udp_sock.recvfrom(65535, timeout)
|
| 841 |
+
else:
|
| 842 |
+
ldata = await _read_exactly(tcp_sock, 2, mexpiration)
|
| 843 |
+
(l,) = struct.unpack("!H", ldata)
|
| 844 |
+
rwire = await _read_exactly(tcp_sock, l, mexpiration)
|
| 845 |
+
r = dns.message.from_wire(
|
| 846 |
+
rwire,
|
| 847 |
+
keyring=query.keyring,
|
| 848 |
+
request_mac=query.mac,
|
| 849 |
+
xfr=True,
|
| 850 |
+
origin=origin,
|
| 851 |
+
tsig_ctx=tsig_ctx,
|
| 852 |
+
multi=(not is_udp),
|
| 853 |
+
one_rr_per_rrset=is_ixfr,
|
| 854 |
+
)
|
| 855 |
+
done = inbound.process_message(r)
|
| 856 |
+
yield r
|
| 857 |
+
tsig_ctx = r.tsig_ctx
|
| 858 |
+
if query.keyring and not r.had_tsig:
|
| 859 |
+
raise dns.exception.FormError("missing TSIG")
|
| 860 |
+
|
| 861 |
+
|
| 862 |
+
async def inbound_xfr(
|
| 863 |
+
where: str,
|
| 864 |
+
txn_manager: dns.transaction.TransactionManager,
|
| 865 |
+
query: Optional[dns.message.Message] = None,
|
| 866 |
+
port: int = 53,
|
| 867 |
+
timeout: Optional[float] = None,
|
| 868 |
+
lifetime: Optional[float] = None,
|
| 869 |
+
source: Optional[str] = None,
|
| 870 |
+
source_port: int = 0,
|
| 871 |
+
udp_mode: UDPMode = UDPMode.NEVER,
|
| 872 |
+
backend: Optional[dns.asyncbackend.Backend] = None,
|
| 873 |
+
) -> None:
|
| 874 |
+
"""Conduct an inbound transfer and apply it via a transaction from the
|
| 875 |
+
txn_manager.
|
| 876 |
+
|
| 877 |
+
*backend*, a ``dns.asyncbackend.Backend``, or ``None``. If ``None``,
|
| 878 |
+
the default, then dnspython will use the default backend.
|
| 879 |
+
|
| 880 |
+
See :py:func:`dns.query.inbound_xfr()` for the documentation of
|
| 881 |
+
the other parameters, exceptions, and return type of this method.
|
| 882 |
+
"""
|
| 883 |
+
if query is None:
|
| 884 |
+
(query, serial) = dns.xfr.make_query(txn_manager)
|
| 885 |
+
else:
|
| 886 |
+
serial = dns.xfr.extract_serial_from_query(query)
|
| 887 |
+
af = dns.inet.af_for_address(where)
|
| 888 |
+
stuple = _source_tuple(af, source, source_port)
|
| 889 |
+
dtuple = (where, port)
|
| 890 |
+
if not backend:
|
| 891 |
+
backend = dns.asyncbackend.get_default_backend()
|
| 892 |
+
(_, expiration) = _compute_times(lifetime)
|
| 893 |
+
if query.question[0].rdtype == dns.rdatatype.IXFR and udp_mode != UDPMode.NEVER:
|
| 894 |
+
s = await backend.make_socket(
|
| 895 |
+
af, socket.SOCK_DGRAM, 0, stuple, dtuple, _timeout(expiration)
|
| 896 |
+
)
|
| 897 |
+
async with s:
|
| 898 |
+
try:
|
| 899 |
+
async for _ in _inbound_xfr(
|
| 900 |
+
txn_manager, s, query, serial, timeout, expiration
|
| 901 |
+
):
|
| 902 |
+
pass
|
| 903 |
+
return
|
| 904 |
+
except dns.xfr.UseTCP:
|
| 905 |
+
if udp_mode == UDPMode.ONLY:
|
| 906 |
+
raise
|
| 907 |
+
|
| 908 |
+
s = await backend.make_socket(
|
| 909 |
+
af, socket.SOCK_STREAM, 0, stuple, dtuple, _timeout(expiration)
|
| 910 |
+
)
|
| 911 |
+
async with s:
|
| 912 |
+
async for _ in _inbound_xfr(txn_manager, s, query, serial, timeout, expiration):
|
| 913 |
+
pass
|
vllm/lib/python3.10/site-packages/dns/enum.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
|
| 2 |
+
|
| 3 |
+
# Copyright (C) 2003-2017 Nominum, Inc.
|
| 4 |
+
#
|
| 5 |
+
# Permission to use, copy, modify, and distribute this software and its
|
| 6 |
+
# documentation for any purpose with or without fee is hereby granted,
|
| 7 |
+
# provided that the above copyright notice and this permission notice
|
| 8 |
+
# appear in all copies.
|
| 9 |
+
#
|
| 10 |
+
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
| 11 |
+
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
| 12 |
+
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
| 13 |
+
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
| 14 |
+
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
| 15 |
+
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
| 16 |
+
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
| 17 |
+
|
| 18 |
+
import enum
|
| 19 |
+
from typing import Type, TypeVar, Union
|
| 20 |
+
|
| 21 |
+
TIntEnum = TypeVar("TIntEnum", bound="IntEnum")
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class IntEnum(enum.IntEnum):
|
| 25 |
+
@classmethod
|
| 26 |
+
def _missing_(cls, value):
|
| 27 |
+
cls._check_value(value)
|
| 28 |
+
val = int.__new__(cls, value)
|
| 29 |
+
val._name_ = cls._extra_to_text(value, None) or f"{cls._prefix()}{value}"
|
| 30 |
+
val._value_ = value
|
| 31 |
+
return val
|
| 32 |
+
|
| 33 |
+
@classmethod
|
| 34 |
+
def _check_value(cls, value):
|
| 35 |
+
max = cls._maximum()
|
| 36 |
+
if not isinstance(value, int):
|
| 37 |
+
raise TypeError
|
| 38 |
+
if value < 0 or value > max:
|
| 39 |
+
name = cls._short_name()
|
| 40 |
+
raise ValueError(f"{name} must be an int between >= 0 and <= {max}")
|
| 41 |
+
|
| 42 |
+
@classmethod
|
| 43 |
+
def from_text(cls: Type[TIntEnum], text: str) -> TIntEnum:
|
| 44 |
+
text = text.upper()
|
| 45 |
+
try:
|
| 46 |
+
return cls[text]
|
| 47 |
+
except KeyError:
|
| 48 |
+
pass
|
| 49 |
+
value = cls._extra_from_text(text)
|
| 50 |
+
if value:
|
| 51 |
+
return value
|
| 52 |
+
prefix = cls._prefix()
|
| 53 |
+
if text.startswith(prefix) and text[len(prefix) :].isdigit():
|
| 54 |
+
value = int(text[len(prefix) :])
|
| 55 |
+
cls._check_value(value)
|
| 56 |
+
try:
|
| 57 |
+
return cls(value)
|
| 58 |
+
except ValueError:
|
| 59 |
+
return value
|
| 60 |
+
raise cls._unknown_exception_class()
|
| 61 |
+
|
| 62 |
+
@classmethod
|
| 63 |
+
def to_text(cls: Type[TIntEnum], value: int) -> str:
|
| 64 |
+
cls._check_value(value)
|
| 65 |
+
try:
|
| 66 |
+
text = cls(value).name
|
| 67 |
+
except ValueError:
|
| 68 |
+
text = None
|
| 69 |
+
text = cls._extra_to_text(value, text)
|
| 70 |
+
if text is None:
|
| 71 |
+
text = f"{cls._prefix()}{value}"
|
| 72 |
+
return text
|
| 73 |
+
|
| 74 |
+
@classmethod
|
| 75 |
+
def make(cls: Type[TIntEnum], value: Union[int, str]) -> TIntEnum:
|
| 76 |
+
"""Convert text or a value into an enumerated type, if possible.
|
| 77 |
+
|
| 78 |
+
*value*, the ``int`` or ``str`` to convert.
|
| 79 |
+
|
| 80 |
+
Raises a class-specific exception if a ``str`` is provided that
|
| 81 |
+
cannot be converted.
|
| 82 |
+
|
| 83 |
+
Raises ``ValueError`` if the value is out of range.
|
| 84 |
+
|
| 85 |
+
Returns an enumeration from the calling class corresponding to the
|
| 86 |
+
value, if one is defined, or an ``int`` otherwise.
|
| 87 |
+
"""
|
| 88 |
+
|
| 89 |
+
if isinstance(value, str):
|
| 90 |
+
return cls.from_text(value)
|
| 91 |
+
cls._check_value(value)
|
| 92 |
+
return cls(value)
|
| 93 |
+
|
| 94 |
+
@classmethod
|
| 95 |
+
def _maximum(cls):
|
| 96 |
+
raise NotImplementedError # pragma: no cover
|
| 97 |
+
|
| 98 |
+
@classmethod
|
| 99 |
+
def _short_name(cls):
|
| 100 |
+
return cls.__name__.lower()
|
| 101 |
+
|
| 102 |
+
@classmethod
|
| 103 |
+
def _prefix(cls):
|
| 104 |
+
return ""
|
| 105 |
+
|
| 106 |
+
@classmethod
|
| 107 |
+
def _extra_from_text(cls, text): # pylint: disable=W0613
|
| 108 |
+
return None
|
| 109 |
+
|
| 110 |
+
@classmethod
|
| 111 |
+
def _extra_to_text(cls, value, current_text): # pylint: disable=W0613
|
| 112 |
+
return current_text
|
| 113 |
+
|
| 114 |
+
@classmethod
|
| 115 |
+
def _unknown_exception_class(cls):
|
| 116 |
+
return ValueError
|
vllm/lib/python3.10/site-packages/dns/exception.py
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
|
| 2 |
+
|
| 3 |
+
# Copyright (C) 2003-2017 Nominum, Inc.
|
| 4 |
+
#
|
| 5 |
+
# Permission to use, copy, modify, and distribute this software and its
|
| 6 |
+
# documentation for any purpose with or without fee is hereby granted,
|
| 7 |
+
# provided that the above copyright notice and this permission notice
|
| 8 |
+
# appear in all copies.
|
| 9 |
+
#
|
| 10 |
+
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
| 11 |
+
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
| 12 |
+
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
| 13 |
+
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
| 14 |
+
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
| 15 |
+
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
| 16 |
+
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
| 17 |
+
|
| 18 |
+
"""Common DNS Exceptions.
|
| 19 |
+
|
| 20 |
+
Dnspython modules may also define their own exceptions, which will
|
| 21 |
+
always be subclasses of ``DNSException``.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
from typing import Optional, Set
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class DNSException(Exception):
|
| 29 |
+
"""Abstract base class shared by all dnspython exceptions.
|
| 30 |
+
|
| 31 |
+
It supports two basic modes of operation:
|
| 32 |
+
|
| 33 |
+
a) Old/compatible mode is used if ``__init__`` was called with
|
| 34 |
+
empty *kwargs*. In compatible mode all *args* are passed
|
| 35 |
+
to the standard Python Exception class as before and all *args* are
|
| 36 |
+
printed by the standard ``__str__`` implementation. Class variable
|
| 37 |
+
``msg`` (or doc string if ``msg`` is ``None``) is returned from ``str()``
|
| 38 |
+
if *args* is empty.
|
| 39 |
+
|
| 40 |
+
b) New/parametrized mode is used if ``__init__`` was called with
|
| 41 |
+
non-empty *kwargs*.
|
| 42 |
+
In the new mode *args* must be empty and all kwargs must match
|
| 43 |
+
those set in class variable ``supp_kwargs``. All kwargs are stored inside
|
| 44 |
+
``self.kwargs`` and used in a new ``__str__`` implementation to construct
|
| 45 |
+
a formatted message based on the ``fmt`` class variable, a ``string``.
|
| 46 |
+
|
| 47 |
+
In the simplest case it is enough to override the ``supp_kwargs``
|
| 48 |
+
and ``fmt`` class variables to get nice parametrized messages.
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
msg: Optional[str] = None # non-parametrized message
|
| 52 |
+
supp_kwargs: Set[str] = set() # accepted parameters for _fmt_kwargs (sanity check)
|
| 53 |
+
fmt: Optional[str] = None # message parametrized with results from _fmt_kwargs
|
| 54 |
+
|
| 55 |
+
def __init__(self, *args, **kwargs):
|
| 56 |
+
self._check_params(*args, **kwargs)
|
| 57 |
+
if kwargs:
|
| 58 |
+
# This call to a virtual method from __init__ is ok in our usage
|
| 59 |
+
self.kwargs = self._check_kwargs(**kwargs) # lgtm[py/init-calls-subclass]
|
| 60 |
+
self.msg = str(self)
|
| 61 |
+
else:
|
| 62 |
+
self.kwargs = dict() # defined but empty for old mode exceptions
|
| 63 |
+
if self.msg is None:
|
| 64 |
+
# doc string is better implicit message than empty string
|
| 65 |
+
self.msg = self.__doc__
|
| 66 |
+
if args:
|
| 67 |
+
super().__init__(*args)
|
| 68 |
+
else:
|
| 69 |
+
super().__init__(self.msg)
|
| 70 |
+
|
| 71 |
+
def _check_params(self, *args, **kwargs):
|
| 72 |
+
"""Old exceptions supported only args and not kwargs.
|
| 73 |
+
|
| 74 |
+
For sanity we do not allow to mix old and new behavior."""
|
| 75 |
+
if args or kwargs:
|
| 76 |
+
assert bool(args) != bool(
|
| 77 |
+
kwargs
|
| 78 |
+
), "keyword arguments are mutually exclusive with positional args"
|
| 79 |
+
|
| 80 |
+
def _check_kwargs(self, **kwargs):
|
| 81 |
+
if kwargs:
|
| 82 |
+
assert (
|
| 83 |
+
set(kwargs.keys()) == self.supp_kwargs
|
| 84 |
+
), f"following set of keyword args is required: {self.supp_kwargs}"
|
| 85 |
+
return kwargs
|
| 86 |
+
|
| 87 |
+
def _fmt_kwargs(self, **kwargs):
|
| 88 |
+
"""Format kwargs before printing them.
|
| 89 |
+
|
| 90 |
+
Resulting dictionary has to have keys necessary for str.format call
|
| 91 |
+
on fmt class variable.
|
| 92 |
+
"""
|
| 93 |
+
fmtargs = {}
|
| 94 |
+
for kw, data in kwargs.items():
|
| 95 |
+
if isinstance(data, (list, set)):
|
| 96 |
+
# convert list of <someobj> to list of str(<someobj>)
|
| 97 |
+
fmtargs[kw] = list(map(str, data))
|
| 98 |
+
if len(fmtargs[kw]) == 1:
|
| 99 |
+
# remove list brackets [] from single-item lists
|
| 100 |
+
fmtargs[kw] = fmtargs[kw].pop()
|
| 101 |
+
else:
|
| 102 |
+
fmtargs[kw] = data
|
| 103 |
+
return fmtargs
|
| 104 |
+
|
| 105 |
+
def __str__(self):
|
| 106 |
+
if self.kwargs and self.fmt:
|
| 107 |
+
# provide custom message constructed from keyword arguments
|
| 108 |
+
fmtargs = self._fmt_kwargs(**self.kwargs)
|
| 109 |
+
return self.fmt.format(**fmtargs)
|
| 110 |
+
else:
|
| 111 |
+
# print *args directly in the same way as old DNSException
|
| 112 |
+
return super().__str__()
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
class FormError(DNSException):
|
| 116 |
+
"""DNS message is malformed."""
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
class SyntaxError(DNSException):
|
| 120 |
+
"""Text input is malformed."""
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
class UnexpectedEnd(SyntaxError):
|
| 124 |
+
"""Text input ended unexpectedly."""
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
class TooBig(DNSException):
|
| 128 |
+
"""The DNS message is too big."""
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
class Timeout(DNSException):
|
| 132 |
+
"""The DNS operation timed out."""
|
| 133 |
+
|
| 134 |
+
supp_kwargs = {"timeout"}
|
| 135 |
+
fmt = "The DNS operation timed out after {timeout:.3f} seconds"
|
| 136 |
+
|
| 137 |
+
# We do this as otherwise mypy complains about unexpected keyword argument
|
| 138 |
+
# idna_exception
|
| 139 |
+
def __init__(self, *args, **kwargs):
|
| 140 |
+
super().__init__(*args, **kwargs)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
class UnsupportedAlgorithm(DNSException):
|
| 144 |
+
"""The DNSSEC algorithm is not supported."""
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
class AlgorithmKeyMismatch(UnsupportedAlgorithm):
|
| 148 |
+
"""The DNSSEC algorithm is not supported for the given key type."""
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
class ValidationFailure(DNSException):
|
| 152 |
+
"""The DNSSEC signature is invalid."""
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
class DeniedByPolicy(DNSException):
|
| 156 |
+
"""Denied by DNSSEC policy."""
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
class ExceptionWrapper:
|
| 160 |
+
def __init__(self, exception_class):
|
| 161 |
+
self.exception_class = exception_class
|
| 162 |
+
|
| 163 |
+
def __enter__(self):
|
| 164 |
+
return self
|
| 165 |
+
|
| 166 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 167 |
+
if exc_type is not None and not isinstance(exc_val, self.exception_class):
|
| 168 |
+
raise self.exception_class(str(exc_val)) from exc_val
|
| 169 |
+
return False
|
vllm/lib/python3.10/site-packages/dns/name.py
ADDED
|
@@ -0,0 +1,1284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
|
| 2 |
+
|
| 3 |
+
# Copyright (C) 2001-2017 Nominum, Inc.
|
| 4 |
+
#
|
| 5 |
+
# Permission to use, copy, modify, and distribute this software and its
|
| 6 |
+
# documentation for any purpose with or without fee is hereby granted,
|
| 7 |
+
# provided that the above copyright notice and this permission notice
|
| 8 |
+
# appear in all copies.
|
| 9 |
+
#
|
| 10 |
+
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
| 11 |
+
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
| 12 |
+
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
| 13 |
+
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
| 14 |
+
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
| 15 |
+
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
| 16 |
+
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
| 17 |
+
|
| 18 |
+
"""DNS Names.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
import copy
|
| 22 |
+
import encodings.idna # type: ignore
|
| 23 |
+
import functools
|
| 24 |
+
import struct
|
| 25 |
+
from typing import Any, Callable, Dict, Iterable, Optional, Tuple, Union
|
| 26 |
+
|
| 27 |
+
import dns._features
|
| 28 |
+
import dns.enum
|
| 29 |
+
import dns.exception
|
| 30 |
+
import dns.immutable
|
| 31 |
+
import dns.wire
|
| 32 |
+
|
| 33 |
+
if dns._features.have("idna"):
|
| 34 |
+
import idna # type: ignore
|
| 35 |
+
|
| 36 |
+
have_idna_2008 = True
|
| 37 |
+
else: # pragma: no cover
|
| 38 |
+
have_idna_2008 = False
|
| 39 |
+
|
| 40 |
+
CompressType = Dict["Name", int]
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class NameRelation(dns.enum.IntEnum):
|
| 44 |
+
"""Name relation result from fullcompare()."""
|
| 45 |
+
|
| 46 |
+
# This is an IntEnum for backwards compatibility in case anyone
|
| 47 |
+
# has hardwired the constants.
|
| 48 |
+
|
| 49 |
+
#: The compared names have no relationship to each other.
|
| 50 |
+
NONE = 0
|
| 51 |
+
#: the first name is a superdomain of the second.
|
| 52 |
+
SUPERDOMAIN = 1
|
| 53 |
+
#: The first name is a subdomain of the second.
|
| 54 |
+
SUBDOMAIN = 2
|
| 55 |
+
#: The compared names are equal.
|
| 56 |
+
EQUAL = 3
|
| 57 |
+
#: The compared names have a common ancestor.
|
| 58 |
+
COMMONANCESTOR = 4
|
| 59 |
+
|
| 60 |
+
@classmethod
|
| 61 |
+
def _maximum(cls):
|
| 62 |
+
return cls.COMMONANCESTOR # pragma: no cover
|
| 63 |
+
|
| 64 |
+
@classmethod
|
| 65 |
+
def _short_name(cls):
|
| 66 |
+
return cls.__name__ # pragma: no cover
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
# Backwards compatibility
|
| 70 |
+
NAMERELN_NONE = NameRelation.NONE
|
| 71 |
+
NAMERELN_SUPERDOMAIN = NameRelation.SUPERDOMAIN
|
| 72 |
+
NAMERELN_SUBDOMAIN = NameRelation.SUBDOMAIN
|
| 73 |
+
NAMERELN_EQUAL = NameRelation.EQUAL
|
| 74 |
+
NAMERELN_COMMONANCESTOR = NameRelation.COMMONANCESTOR
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
class EmptyLabel(dns.exception.SyntaxError):
|
| 78 |
+
"""A DNS label is empty."""
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class BadEscape(dns.exception.SyntaxError):
|
| 82 |
+
"""An escaped code in a text format of DNS name is invalid."""
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
class BadPointer(dns.exception.FormError):
|
| 86 |
+
"""A DNS compression pointer points forward instead of backward."""
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class BadLabelType(dns.exception.FormError):
|
| 90 |
+
"""The label type in DNS name wire format is unknown."""
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
class NeedAbsoluteNameOrOrigin(dns.exception.DNSException):
|
| 94 |
+
"""An attempt was made to convert a non-absolute name to
|
| 95 |
+
wire when there was also a non-absolute (or missing) origin."""
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class NameTooLong(dns.exception.FormError):
|
| 99 |
+
"""A DNS name is > 255 octets long."""
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
class LabelTooLong(dns.exception.SyntaxError):
|
| 103 |
+
"""A DNS label is > 63 octets long."""
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class AbsoluteConcatenation(dns.exception.DNSException):
|
| 107 |
+
"""An attempt was made to append anything other than the
|
| 108 |
+
empty name to an absolute DNS name."""
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
class NoParent(dns.exception.DNSException):
|
| 112 |
+
"""An attempt was made to get the parent of the root name
|
| 113 |
+
or the empty name."""
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
class NoIDNA2008(dns.exception.DNSException):
|
| 117 |
+
"""IDNA 2008 processing was requested but the idna module is not
|
| 118 |
+
available."""
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
class IDNAException(dns.exception.DNSException):
|
| 122 |
+
"""IDNA processing raised an exception."""
|
| 123 |
+
|
| 124 |
+
supp_kwargs = {"idna_exception"}
|
| 125 |
+
fmt = "IDNA processing exception: {idna_exception}"
|
| 126 |
+
|
| 127 |
+
# We do this as otherwise mypy complains about unexpected keyword argument
|
| 128 |
+
# idna_exception
|
| 129 |
+
def __init__(self, *args, **kwargs):
|
| 130 |
+
super().__init__(*args, **kwargs)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class NeedSubdomainOfOrigin(dns.exception.DNSException):
|
| 134 |
+
"""An absolute name was provided that is not a subdomain of the specified origin."""
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
_escaped = b'"().;\\@$'
|
| 138 |
+
_escaped_text = '"().;\\@$'
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def _escapify(label: Union[bytes, str]) -> str:
|
| 142 |
+
"""Escape the characters in label which need it.
|
| 143 |
+
@returns: the escaped string
|
| 144 |
+
@rtype: string"""
|
| 145 |
+
if isinstance(label, bytes):
|
| 146 |
+
# Ordinary DNS label mode. Escape special characters and values
|
| 147 |
+
# < 0x20 or > 0x7f.
|
| 148 |
+
text = ""
|
| 149 |
+
for c in label:
|
| 150 |
+
if c in _escaped:
|
| 151 |
+
text += "\\" + chr(c)
|
| 152 |
+
elif c > 0x20 and c < 0x7F:
|
| 153 |
+
text += chr(c)
|
| 154 |
+
else:
|
| 155 |
+
text += "\\%03d" % c
|
| 156 |
+
return text
|
| 157 |
+
|
| 158 |
+
# Unicode label mode. Escape only special characters and values < 0x20
|
| 159 |
+
text = ""
|
| 160 |
+
for uc in label:
|
| 161 |
+
if uc in _escaped_text:
|
| 162 |
+
text += "\\" + uc
|
| 163 |
+
elif uc <= "\x20":
|
| 164 |
+
text += "\\%03d" % ord(uc)
|
| 165 |
+
else:
|
| 166 |
+
text += uc
|
| 167 |
+
return text
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
class IDNACodec:
|
| 171 |
+
"""Abstract base class for IDNA encoder/decoders."""
|
| 172 |
+
|
| 173 |
+
def __init__(self):
|
| 174 |
+
pass
|
| 175 |
+
|
| 176 |
+
def is_idna(self, label: bytes) -> bool:
|
| 177 |
+
return label.lower().startswith(b"xn--")
|
| 178 |
+
|
| 179 |
+
def encode(self, label: str) -> bytes:
|
| 180 |
+
raise NotImplementedError # pragma: no cover
|
| 181 |
+
|
| 182 |
+
def decode(self, label: bytes) -> str:
|
| 183 |
+
# We do not apply any IDNA policy on decode.
|
| 184 |
+
if self.is_idna(label):
|
| 185 |
+
try:
|
| 186 |
+
slabel = label[4:].decode("punycode")
|
| 187 |
+
return _escapify(slabel)
|
| 188 |
+
except Exception as e:
|
| 189 |
+
raise IDNAException(idna_exception=e)
|
| 190 |
+
else:
|
| 191 |
+
return _escapify(label)
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
class IDNA2003Codec(IDNACodec):
|
| 195 |
+
"""IDNA 2003 encoder/decoder."""
|
| 196 |
+
|
| 197 |
+
def __init__(self, strict_decode: bool = False):
|
| 198 |
+
"""Initialize the IDNA 2003 encoder/decoder.
|
| 199 |
+
|
| 200 |
+
*strict_decode* is a ``bool``. If `True`, then IDNA2003 checking
|
| 201 |
+
is done when decoding. This can cause failures if the name
|
| 202 |
+
was encoded with IDNA2008. The default is `False`.
|
| 203 |
+
"""
|
| 204 |
+
|
| 205 |
+
super().__init__()
|
| 206 |
+
self.strict_decode = strict_decode
|
| 207 |
+
|
| 208 |
+
def encode(self, label: str) -> bytes:
|
| 209 |
+
"""Encode *label*."""
|
| 210 |
+
|
| 211 |
+
if label == "":
|
| 212 |
+
return b""
|
| 213 |
+
try:
|
| 214 |
+
return encodings.idna.ToASCII(label)
|
| 215 |
+
except UnicodeError:
|
| 216 |
+
raise LabelTooLong
|
| 217 |
+
|
| 218 |
+
def decode(self, label: bytes) -> str:
|
| 219 |
+
"""Decode *label*."""
|
| 220 |
+
if not self.strict_decode:
|
| 221 |
+
return super().decode(label)
|
| 222 |
+
if label == b"":
|
| 223 |
+
return ""
|
| 224 |
+
try:
|
| 225 |
+
return _escapify(encodings.idna.ToUnicode(label))
|
| 226 |
+
except Exception as e:
|
| 227 |
+
raise IDNAException(idna_exception=e)
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
class IDNA2008Codec(IDNACodec):
|
| 231 |
+
"""IDNA 2008 encoder/decoder."""
|
| 232 |
+
|
| 233 |
+
def __init__(
|
| 234 |
+
self,
|
| 235 |
+
uts_46: bool = False,
|
| 236 |
+
transitional: bool = False,
|
| 237 |
+
allow_pure_ascii: bool = False,
|
| 238 |
+
strict_decode: bool = False,
|
| 239 |
+
):
|
| 240 |
+
"""Initialize the IDNA 2008 encoder/decoder.
|
| 241 |
+
|
| 242 |
+
*uts_46* is a ``bool``. If True, apply Unicode IDNA
|
| 243 |
+
compatibility processing as described in Unicode Technical
|
| 244 |
+
Standard #46 (https://unicode.org/reports/tr46/).
|
| 245 |
+
If False, do not apply the mapping. The default is False.
|
| 246 |
+
|
| 247 |
+
*transitional* is a ``bool``: If True, use the
|
| 248 |
+
"transitional" mode described in Unicode Technical Standard
|
| 249 |
+
#46. The default is False.
|
| 250 |
+
|
| 251 |
+
*allow_pure_ascii* is a ``bool``. If True, then a label which
|
| 252 |
+
consists of only ASCII characters is allowed. This is less
|
| 253 |
+
strict than regular IDNA 2008, but is also necessary for mixed
|
| 254 |
+
names, e.g. a name with starting with "_sip._tcp." and ending
|
| 255 |
+
in an IDN suffix which would otherwise be disallowed. The
|
| 256 |
+
default is False.
|
| 257 |
+
|
| 258 |
+
*strict_decode* is a ``bool``: If True, then IDNA2008 checking
|
| 259 |
+
is done when decoding. This can cause failures if the name
|
| 260 |
+
was encoded with IDNA2003. The default is False.
|
| 261 |
+
"""
|
| 262 |
+
super().__init__()
|
| 263 |
+
self.uts_46 = uts_46
|
| 264 |
+
self.transitional = transitional
|
| 265 |
+
self.allow_pure_ascii = allow_pure_ascii
|
| 266 |
+
self.strict_decode = strict_decode
|
| 267 |
+
|
| 268 |
+
def encode(self, label: str) -> bytes:
|
| 269 |
+
if label == "":
|
| 270 |
+
return b""
|
| 271 |
+
if self.allow_pure_ascii and is_all_ascii(label):
|
| 272 |
+
encoded = label.encode("ascii")
|
| 273 |
+
if len(encoded) > 63:
|
| 274 |
+
raise LabelTooLong
|
| 275 |
+
return encoded
|
| 276 |
+
if not have_idna_2008:
|
| 277 |
+
raise NoIDNA2008
|
| 278 |
+
try:
|
| 279 |
+
if self.uts_46:
|
| 280 |
+
# pylint: disable=possibly-used-before-assignment
|
| 281 |
+
label = idna.uts46_remap(label, False, self.transitional)
|
| 282 |
+
return idna.alabel(label)
|
| 283 |
+
except idna.IDNAError as e:
|
| 284 |
+
if e.args[0] == "Label too long":
|
| 285 |
+
raise LabelTooLong
|
| 286 |
+
else:
|
| 287 |
+
raise IDNAException(idna_exception=e)
|
| 288 |
+
|
| 289 |
+
def decode(self, label: bytes) -> str:
|
| 290 |
+
if not self.strict_decode:
|
| 291 |
+
return super().decode(label)
|
| 292 |
+
if label == b"":
|
| 293 |
+
return ""
|
| 294 |
+
if not have_idna_2008:
|
| 295 |
+
raise NoIDNA2008
|
| 296 |
+
try:
|
| 297 |
+
ulabel = idna.ulabel(label)
|
| 298 |
+
if self.uts_46:
|
| 299 |
+
ulabel = idna.uts46_remap(ulabel, False, self.transitional)
|
| 300 |
+
return _escapify(ulabel)
|
| 301 |
+
except (idna.IDNAError, UnicodeError) as e:
|
| 302 |
+
raise IDNAException(idna_exception=e)
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
IDNA_2003_Practical = IDNA2003Codec(False)
|
| 306 |
+
IDNA_2003_Strict = IDNA2003Codec(True)
|
| 307 |
+
IDNA_2003 = IDNA_2003_Practical
|
| 308 |
+
IDNA_2008_Practical = IDNA2008Codec(True, False, True, False)
|
| 309 |
+
IDNA_2008_UTS_46 = IDNA2008Codec(True, False, False, False)
|
| 310 |
+
IDNA_2008_Strict = IDNA2008Codec(False, False, False, True)
|
| 311 |
+
IDNA_2008_Transitional = IDNA2008Codec(True, True, False, False)
|
| 312 |
+
IDNA_2008 = IDNA_2008_Practical
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
def _validate_labels(labels: Tuple[bytes, ...]) -> None:
|
| 316 |
+
"""Check for empty labels in the middle of a label sequence,
|
| 317 |
+
labels that are too long, and for too many labels.
|
| 318 |
+
|
| 319 |
+
Raises ``dns.name.NameTooLong`` if the name as a whole is too long.
|
| 320 |
+
|
| 321 |
+
Raises ``dns.name.EmptyLabel`` if a label is empty (i.e. the root
|
| 322 |
+
label) and appears in a position other than the end of the label
|
| 323 |
+
sequence
|
| 324 |
+
|
| 325 |
+
"""
|
| 326 |
+
|
| 327 |
+
l = len(labels)
|
| 328 |
+
total = 0
|
| 329 |
+
i = -1
|
| 330 |
+
j = 0
|
| 331 |
+
for label in labels:
|
| 332 |
+
ll = len(label)
|
| 333 |
+
total += ll + 1
|
| 334 |
+
if ll > 63:
|
| 335 |
+
raise LabelTooLong
|
| 336 |
+
if i < 0 and label == b"":
|
| 337 |
+
i = j
|
| 338 |
+
j += 1
|
| 339 |
+
if total > 255:
|
| 340 |
+
raise NameTooLong
|
| 341 |
+
if i >= 0 and i != l - 1:
|
| 342 |
+
raise EmptyLabel
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
def _maybe_convert_to_binary(label: Union[bytes, str]) -> bytes:
|
| 346 |
+
"""If label is ``str``, convert it to ``bytes``. If it is already
|
| 347 |
+
``bytes`` just return it.
|
| 348 |
+
|
| 349 |
+
"""
|
| 350 |
+
|
| 351 |
+
if isinstance(label, bytes):
|
| 352 |
+
return label
|
| 353 |
+
if isinstance(label, str):
|
| 354 |
+
return label.encode()
|
| 355 |
+
raise ValueError # pragma: no cover
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
@dns.immutable.immutable
|
| 359 |
+
class Name:
|
| 360 |
+
"""A DNS name.
|
| 361 |
+
|
| 362 |
+
The dns.name.Name class represents a DNS name as a tuple of
|
| 363 |
+
labels. Each label is a ``bytes`` in DNS wire format. Instances
|
| 364 |
+
of the class are immutable.
|
| 365 |
+
"""
|
| 366 |
+
|
| 367 |
+
__slots__ = ["labels"]
|
| 368 |
+
|
| 369 |
+
def __init__(self, labels: Iterable[Union[bytes, str]]):
|
| 370 |
+
"""*labels* is any iterable whose values are ``str`` or ``bytes``."""
|
| 371 |
+
|
| 372 |
+
blabels = [_maybe_convert_to_binary(x) for x in labels]
|
| 373 |
+
self.labels = tuple(blabels)
|
| 374 |
+
_validate_labels(self.labels)
|
| 375 |
+
|
| 376 |
+
def __copy__(self):
|
| 377 |
+
return Name(self.labels)
|
| 378 |
+
|
| 379 |
+
def __deepcopy__(self, memo):
|
| 380 |
+
return Name(copy.deepcopy(self.labels, memo))
|
| 381 |
+
|
| 382 |
+
def __getstate__(self):
|
| 383 |
+
# Names can be pickled
|
| 384 |
+
return {"labels": self.labels}
|
| 385 |
+
|
| 386 |
+
def __setstate__(self, state):
|
| 387 |
+
super().__setattr__("labels", state["labels"])
|
| 388 |
+
_validate_labels(self.labels)
|
| 389 |
+
|
| 390 |
+
def is_absolute(self) -> bool:
|
| 391 |
+
"""Is the most significant label of this name the root label?
|
| 392 |
+
|
| 393 |
+
Returns a ``bool``.
|
| 394 |
+
"""
|
| 395 |
+
|
| 396 |
+
return len(self.labels) > 0 and self.labels[-1] == b""
|
| 397 |
+
|
| 398 |
+
def is_wild(self) -> bool:
|
| 399 |
+
"""Is this name wild? (I.e. Is the least significant label '*'?)
|
| 400 |
+
|
| 401 |
+
Returns a ``bool``.
|
| 402 |
+
"""
|
| 403 |
+
|
| 404 |
+
return len(self.labels) > 0 and self.labels[0] == b"*"
|
| 405 |
+
|
| 406 |
+
def __hash__(self) -> int:
|
| 407 |
+
"""Return a case-insensitive hash of the name.
|
| 408 |
+
|
| 409 |
+
Returns an ``int``.
|
| 410 |
+
"""
|
| 411 |
+
|
| 412 |
+
h = 0
|
| 413 |
+
for label in self.labels:
|
| 414 |
+
for c in label.lower():
|
| 415 |
+
h += (h << 3) + c
|
| 416 |
+
return h
|
| 417 |
+
|
| 418 |
+
def fullcompare(self, other: "Name") -> Tuple[NameRelation, int, int]:
|
| 419 |
+
"""Compare two names, returning a 3-tuple
|
| 420 |
+
``(relation, order, nlabels)``.
|
| 421 |
+
|
| 422 |
+
*relation* describes the relation ship between the names,
|
| 423 |
+
and is one of: ``dns.name.NameRelation.NONE``,
|
| 424 |
+
``dns.name.NameRelation.SUPERDOMAIN``, ``dns.name.NameRelation.SUBDOMAIN``,
|
| 425 |
+
``dns.name.NameRelation.EQUAL``, or ``dns.name.NameRelation.COMMONANCESTOR``.
|
| 426 |
+
|
| 427 |
+
*order* is < 0 if *self* < *other*, > 0 if *self* > *other*, and ==
|
| 428 |
+
0 if *self* == *other*. A relative name is always less than an
|
| 429 |
+
absolute name. If both names have the same relativity, then
|
| 430 |
+
the DNSSEC order relation is used to order them.
|
| 431 |
+
|
| 432 |
+
*nlabels* is the number of significant labels that the two names
|
| 433 |
+
have in common.
|
| 434 |
+
|
| 435 |
+
Here are some examples. Names ending in "." are absolute names,
|
| 436 |
+
those not ending in "." are relative names.
|
| 437 |
+
|
| 438 |
+
============= ============= =========== ===== =======
|
| 439 |
+
self other relation order nlabels
|
| 440 |
+
============= ============= =========== ===== =======
|
| 441 |
+
www.example. www.example. equal 0 3
|
| 442 |
+
www.example. example. subdomain > 0 2
|
| 443 |
+
example. www.example. superdomain < 0 2
|
| 444 |
+
example1.com. example2.com. common anc. < 0 2
|
| 445 |
+
example1 example2. none < 0 0
|
| 446 |
+
example1. example2 none > 0 0
|
| 447 |
+
============= ============= =========== ===== =======
|
| 448 |
+
"""
|
| 449 |
+
|
| 450 |
+
sabs = self.is_absolute()
|
| 451 |
+
oabs = other.is_absolute()
|
| 452 |
+
if sabs != oabs:
|
| 453 |
+
if sabs:
|
| 454 |
+
return (NameRelation.NONE, 1, 0)
|
| 455 |
+
else:
|
| 456 |
+
return (NameRelation.NONE, -1, 0)
|
| 457 |
+
l1 = len(self.labels)
|
| 458 |
+
l2 = len(other.labels)
|
| 459 |
+
ldiff = l1 - l2
|
| 460 |
+
if ldiff < 0:
|
| 461 |
+
l = l1
|
| 462 |
+
else:
|
| 463 |
+
l = l2
|
| 464 |
+
|
| 465 |
+
order = 0
|
| 466 |
+
nlabels = 0
|
| 467 |
+
namereln = NameRelation.NONE
|
| 468 |
+
while l > 0:
|
| 469 |
+
l -= 1
|
| 470 |
+
l1 -= 1
|
| 471 |
+
l2 -= 1
|
| 472 |
+
label1 = self.labels[l1].lower()
|
| 473 |
+
label2 = other.labels[l2].lower()
|
| 474 |
+
if label1 < label2:
|
| 475 |
+
order = -1
|
| 476 |
+
if nlabels > 0:
|
| 477 |
+
namereln = NameRelation.COMMONANCESTOR
|
| 478 |
+
return (namereln, order, nlabels)
|
| 479 |
+
elif label1 > label2:
|
| 480 |
+
order = 1
|
| 481 |
+
if nlabels > 0:
|
| 482 |
+
namereln = NameRelation.COMMONANCESTOR
|
| 483 |
+
return (namereln, order, nlabels)
|
| 484 |
+
nlabels += 1
|
| 485 |
+
order = ldiff
|
| 486 |
+
if ldiff < 0:
|
| 487 |
+
namereln = NameRelation.SUPERDOMAIN
|
| 488 |
+
elif ldiff > 0:
|
| 489 |
+
namereln = NameRelation.SUBDOMAIN
|
| 490 |
+
else:
|
| 491 |
+
namereln = NameRelation.EQUAL
|
| 492 |
+
return (namereln, order, nlabels)
|
| 493 |
+
|
| 494 |
+
def is_subdomain(self, other: "Name") -> bool:
|
| 495 |
+
"""Is self a subdomain of other?
|
| 496 |
+
|
| 497 |
+
Note that the notion of subdomain includes equality, e.g.
|
| 498 |
+
"dnspython.org" is a subdomain of itself.
|
| 499 |
+
|
| 500 |
+
Returns a ``bool``.
|
| 501 |
+
"""
|
| 502 |
+
|
| 503 |
+
(nr, _, _) = self.fullcompare(other)
|
| 504 |
+
if nr == NameRelation.SUBDOMAIN or nr == NameRelation.EQUAL:
|
| 505 |
+
return True
|
| 506 |
+
return False
|
| 507 |
+
|
| 508 |
+
def is_superdomain(self, other: "Name") -> bool:
|
| 509 |
+
"""Is self a superdomain of other?
|
| 510 |
+
|
| 511 |
+
Note that the notion of superdomain includes equality, e.g.
|
| 512 |
+
"dnspython.org" is a superdomain of itself.
|
| 513 |
+
|
| 514 |
+
Returns a ``bool``.
|
| 515 |
+
"""
|
| 516 |
+
|
| 517 |
+
(nr, _, _) = self.fullcompare(other)
|
| 518 |
+
if nr == NameRelation.SUPERDOMAIN or nr == NameRelation.EQUAL:
|
| 519 |
+
return True
|
| 520 |
+
return False
|
| 521 |
+
|
| 522 |
+
def canonicalize(self) -> "Name":
|
| 523 |
+
"""Return a name which is equal to the current name, but is in
|
| 524 |
+
DNSSEC canonical form.
|
| 525 |
+
"""
|
| 526 |
+
|
| 527 |
+
return Name([x.lower() for x in self.labels])
|
| 528 |
+
|
| 529 |
+
def __eq__(self, other):
|
| 530 |
+
if isinstance(other, Name):
|
| 531 |
+
return self.fullcompare(other)[1] == 0
|
| 532 |
+
else:
|
| 533 |
+
return False
|
| 534 |
+
|
| 535 |
+
def __ne__(self, other):
|
| 536 |
+
if isinstance(other, Name):
|
| 537 |
+
return self.fullcompare(other)[1] != 0
|
| 538 |
+
else:
|
| 539 |
+
return True
|
| 540 |
+
|
| 541 |
+
def __lt__(self, other):
|
| 542 |
+
if isinstance(other, Name):
|
| 543 |
+
return self.fullcompare(other)[1] < 0
|
| 544 |
+
else:
|
| 545 |
+
return NotImplemented
|
| 546 |
+
|
| 547 |
+
def __le__(self, other):
|
| 548 |
+
if isinstance(other, Name):
|
| 549 |
+
return self.fullcompare(other)[1] <= 0
|
| 550 |
+
else:
|
| 551 |
+
return NotImplemented
|
| 552 |
+
|
| 553 |
+
def __ge__(self, other):
|
| 554 |
+
if isinstance(other, Name):
|
| 555 |
+
return self.fullcompare(other)[1] >= 0
|
| 556 |
+
else:
|
| 557 |
+
return NotImplemented
|
| 558 |
+
|
| 559 |
+
def __gt__(self, other):
|
| 560 |
+
if isinstance(other, Name):
|
| 561 |
+
return self.fullcompare(other)[1] > 0
|
| 562 |
+
else:
|
| 563 |
+
return NotImplemented
|
| 564 |
+
|
| 565 |
+
def __repr__(self):
|
| 566 |
+
return "<DNS name " + self.__str__() + ">"
|
| 567 |
+
|
| 568 |
+
def __str__(self):
|
| 569 |
+
return self.to_text(False)
|
| 570 |
+
|
| 571 |
+
def to_text(self, omit_final_dot: bool = False) -> str:
|
| 572 |
+
"""Convert name to DNS text format.
|
| 573 |
+
|
| 574 |
+
*omit_final_dot* is a ``bool``. If True, don't emit the final
|
| 575 |
+
dot (denoting the root label) for absolute names. The default
|
| 576 |
+
is False.
|
| 577 |
+
|
| 578 |
+
Returns a ``str``.
|
| 579 |
+
"""
|
| 580 |
+
|
| 581 |
+
if len(self.labels) == 0:
|
| 582 |
+
return "@"
|
| 583 |
+
if len(self.labels) == 1 and self.labels[0] == b"":
|
| 584 |
+
return "."
|
| 585 |
+
if omit_final_dot and self.is_absolute():
|
| 586 |
+
l = self.labels[:-1]
|
| 587 |
+
else:
|
| 588 |
+
l = self.labels
|
| 589 |
+
s = ".".join(map(_escapify, l))
|
| 590 |
+
return s
|
| 591 |
+
|
| 592 |
+
def to_unicode(
|
| 593 |
+
self, omit_final_dot: bool = False, idna_codec: Optional[IDNACodec] = None
|
| 594 |
+
) -> str:
|
| 595 |
+
"""Convert name to Unicode text format.
|
| 596 |
+
|
| 597 |
+
IDN ACE labels are converted to Unicode.
|
| 598 |
+
|
| 599 |
+
*omit_final_dot* is a ``bool``. If True, don't emit the final
|
| 600 |
+
dot (denoting the root label) for absolute names. The default
|
| 601 |
+
is False.
|
| 602 |
+
*idna_codec* specifies the IDNA encoder/decoder. If None, the
|
| 603 |
+
dns.name.IDNA_2003_Practical encoder/decoder is used.
|
| 604 |
+
The IDNA_2003_Practical decoder does
|
| 605 |
+
not impose any policy, it just decodes punycode, so if you
|
| 606 |
+
don't want checking for compliance, you can use this decoder
|
| 607 |
+
for IDNA2008 as well.
|
| 608 |
+
|
| 609 |
+
Returns a ``str``.
|
| 610 |
+
"""
|
| 611 |
+
|
| 612 |
+
if len(self.labels) == 0:
|
| 613 |
+
return "@"
|
| 614 |
+
if len(self.labels) == 1 and self.labels[0] == b"":
|
| 615 |
+
return "."
|
| 616 |
+
if omit_final_dot and self.is_absolute():
|
| 617 |
+
l = self.labels[:-1]
|
| 618 |
+
else:
|
| 619 |
+
l = self.labels
|
| 620 |
+
if idna_codec is None:
|
| 621 |
+
idna_codec = IDNA_2003_Practical
|
| 622 |
+
return ".".join([idna_codec.decode(x) for x in l])
|
| 623 |
+
|
| 624 |
+
def to_digestable(self, origin: Optional["Name"] = None) -> bytes:
|
| 625 |
+
"""Convert name to a format suitable for digesting in hashes.
|
| 626 |
+
|
| 627 |
+
The name is canonicalized and converted to uncompressed wire
|
| 628 |
+
format. All names in wire format are absolute. If the name
|
| 629 |
+
is a relative name, then an origin must be supplied.
|
| 630 |
+
|
| 631 |
+
*origin* is a ``dns.name.Name`` or ``None``. If the name is
|
| 632 |
+
relative and origin is not ``None``, then origin will be appended
|
| 633 |
+
to the name.
|
| 634 |
+
|
| 635 |
+
Raises ``dns.name.NeedAbsoluteNameOrOrigin`` if the name is
|
| 636 |
+
relative and no origin was provided.
|
| 637 |
+
|
| 638 |
+
Returns a ``bytes``.
|
| 639 |
+
"""
|
| 640 |
+
|
| 641 |
+
digest = self.to_wire(origin=origin, canonicalize=True)
|
| 642 |
+
assert digest is not None
|
| 643 |
+
return digest
|
| 644 |
+
|
| 645 |
+
def to_wire(
|
| 646 |
+
self,
|
| 647 |
+
file: Optional[Any] = None,
|
| 648 |
+
compress: Optional[CompressType] = None,
|
| 649 |
+
origin: Optional["Name"] = None,
|
| 650 |
+
canonicalize: bool = False,
|
| 651 |
+
) -> Optional[bytes]:
|
| 652 |
+
"""Convert name to wire format, possibly compressing it.
|
| 653 |
+
|
| 654 |
+
*file* is the file where the name is emitted (typically an
|
| 655 |
+
io.BytesIO file). If ``None`` (the default), a ``bytes``
|
| 656 |
+
containing the wire name will be returned.
|
| 657 |
+
|
| 658 |
+
*compress*, a ``dict``, is the compression table to use. If
|
| 659 |
+
``None`` (the default), names will not be compressed. Note that
|
| 660 |
+
the compression code assumes that compression offset 0 is the
|
| 661 |
+
start of *file*, and thus compression will not be correct
|
| 662 |
+
if this is not the case.
|
| 663 |
+
|
| 664 |
+
*origin* is a ``dns.name.Name`` or ``None``. If the name is
|
| 665 |
+
relative and origin is not ``None``, then *origin* will be appended
|
| 666 |
+
to it.
|
| 667 |
+
|
| 668 |
+
*canonicalize*, a ``bool``, indicates whether the name should
|
| 669 |
+
be canonicalized; that is, converted to a format suitable for
|
| 670 |
+
digesting in hashes.
|
| 671 |
+
|
| 672 |
+
Raises ``dns.name.NeedAbsoluteNameOrOrigin`` if the name is
|
| 673 |
+
relative and no origin was provided.
|
| 674 |
+
|
| 675 |
+
Returns a ``bytes`` or ``None``.
|
| 676 |
+
"""
|
| 677 |
+
|
| 678 |
+
if file is None:
|
| 679 |
+
out = bytearray()
|
| 680 |
+
for label in self.labels:
|
| 681 |
+
out.append(len(label))
|
| 682 |
+
if canonicalize:
|
| 683 |
+
out += label.lower()
|
| 684 |
+
else:
|
| 685 |
+
out += label
|
| 686 |
+
if not self.is_absolute():
|
| 687 |
+
if origin is None or not origin.is_absolute():
|
| 688 |
+
raise NeedAbsoluteNameOrOrigin
|
| 689 |
+
for label in origin.labels:
|
| 690 |
+
out.append(len(label))
|
| 691 |
+
if canonicalize:
|
| 692 |
+
out += label.lower()
|
| 693 |
+
else:
|
| 694 |
+
out += label
|
| 695 |
+
return bytes(out)
|
| 696 |
+
|
| 697 |
+
labels: Iterable[bytes]
|
| 698 |
+
if not self.is_absolute():
|
| 699 |
+
if origin is None or not origin.is_absolute():
|
| 700 |
+
raise NeedAbsoluteNameOrOrigin
|
| 701 |
+
labels = list(self.labels)
|
| 702 |
+
labels.extend(list(origin.labels))
|
| 703 |
+
else:
|
| 704 |
+
labels = self.labels
|
| 705 |
+
i = 0
|
| 706 |
+
for label in labels:
|
| 707 |
+
n = Name(labels[i:])
|
| 708 |
+
i += 1
|
| 709 |
+
if compress is not None:
|
| 710 |
+
pos = compress.get(n)
|
| 711 |
+
else:
|
| 712 |
+
pos = None
|
| 713 |
+
if pos is not None:
|
| 714 |
+
value = 0xC000 + pos
|
| 715 |
+
s = struct.pack("!H", value)
|
| 716 |
+
file.write(s)
|
| 717 |
+
break
|
| 718 |
+
else:
|
| 719 |
+
if compress is not None and len(n) > 1:
|
| 720 |
+
pos = file.tell()
|
| 721 |
+
if pos <= 0x3FFF:
|
| 722 |
+
compress[n] = pos
|
| 723 |
+
l = len(label)
|
| 724 |
+
file.write(struct.pack("!B", l))
|
| 725 |
+
if l > 0:
|
| 726 |
+
if canonicalize:
|
| 727 |
+
file.write(label.lower())
|
| 728 |
+
else:
|
| 729 |
+
file.write(label)
|
| 730 |
+
return None
|
| 731 |
+
|
| 732 |
+
def __len__(self) -> int:
|
| 733 |
+
"""The length of the name (in labels).
|
| 734 |
+
|
| 735 |
+
Returns an ``int``.
|
| 736 |
+
"""
|
| 737 |
+
|
| 738 |
+
return len(self.labels)
|
| 739 |
+
|
| 740 |
+
def __getitem__(self, index):
|
| 741 |
+
return self.labels[index]
|
| 742 |
+
|
| 743 |
+
def __add__(self, other):
|
| 744 |
+
return self.concatenate(other)
|
| 745 |
+
|
| 746 |
+
def __sub__(self, other):
|
| 747 |
+
return self.relativize(other)
|
| 748 |
+
|
| 749 |
+
def split(self, depth: int) -> Tuple["Name", "Name"]:
|
| 750 |
+
"""Split a name into a prefix and suffix names at the specified depth.
|
| 751 |
+
|
| 752 |
+
*depth* is an ``int`` specifying the number of labels in the suffix
|
| 753 |
+
|
| 754 |
+
Raises ``ValueError`` if *depth* was not >= 0 and <= the length of the
|
| 755 |
+
name.
|
| 756 |
+
|
| 757 |
+
Returns the tuple ``(prefix, suffix)``.
|
| 758 |
+
"""
|
| 759 |
+
|
| 760 |
+
l = len(self.labels)
|
| 761 |
+
if depth == 0:
|
| 762 |
+
return (self, dns.name.empty)
|
| 763 |
+
elif depth == l:
|
| 764 |
+
return (dns.name.empty, self)
|
| 765 |
+
elif depth < 0 or depth > l:
|
| 766 |
+
raise ValueError("depth must be >= 0 and <= the length of the name")
|
| 767 |
+
return (Name(self[:-depth]), Name(self[-depth:]))
|
| 768 |
+
|
| 769 |
+
def concatenate(self, other: "Name") -> "Name":
|
| 770 |
+
"""Return a new name which is the concatenation of self and other.
|
| 771 |
+
|
| 772 |
+
Raises ``dns.name.AbsoluteConcatenation`` if the name is
|
| 773 |
+
absolute and *other* is not the empty name.
|
| 774 |
+
|
| 775 |
+
Returns a ``dns.name.Name``.
|
| 776 |
+
"""
|
| 777 |
+
|
| 778 |
+
if self.is_absolute() and len(other) > 0:
|
| 779 |
+
raise AbsoluteConcatenation
|
| 780 |
+
labels = list(self.labels)
|
| 781 |
+
labels.extend(list(other.labels))
|
| 782 |
+
return Name(labels)
|
| 783 |
+
|
| 784 |
+
def relativize(self, origin: "Name") -> "Name":
|
| 785 |
+
"""If the name is a subdomain of *origin*, return a new name which is
|
| 786 |
+
the name relative to origin. Otherwise return the name.
|
| 787 |
+
|
| 788 |
+
For example, relativizing ``www.dnspython.org.`` to origin
|
| 789 |
+
``dnspython.org.`` returns the name ``www``. Relativizing ``example.``
|
| 790 |
+
to origin ``dnspython.org.`` returns ``example.``.
|
| 791 |
+
|
| 792 |
+
Returns a ``dns.name.Name``.
|
| 793 |
+
"""
|
| 794 |
+
|
| 795 |
+
if origin is not None and self.is_subdomain(origin):
|
| 796 |
+
return Name(self[: -len(origin)])
|
| 797 |
+
else:
|
| 798 |
+
return self
|
| 799 |
+
|
| 800 |
+
def derelativize(self, origin: "Name") -> "Name":
|
| 801 |
+
"""If the name is a relative name, return a new name which is the
|
| 802 |
+
concatenation of the name and origin. Otherwise return the name.
|
| 803 |
+
|
| 804 |
+
For example, derelativizing ``www`` to origin ``dnspython.org.``
|
| 805 |
+
returns the name ``www.dnspython.org.``. Derelativizing ``example.``
|
| 806 |
+
to origin ``dnspython.org.`` returns ``example.``.
|
| 807 |
+
|
| 808 |
+
Returns a ``dns.name.Name``.
|
| 809 |
+
"""
|
| 810 |
+
|
| 811 |
+
if not self.is_absolute():
|
| 812 |
+
return self.concatenate(origin)
|
| 813 |
+
else:
|
| 814 |
+
return self
|
| 815 |
+
|
| 816 |
+
def choose_relativity(
|
| 817 |
+
self, origin: Optional["Name"] = None, relativize: bool = True
|
| 818 |
+
) -> "Name":
|
| 819 |
+
"""Return a name with the relativity desired by the caller.
|
| 820 |
+
|
| 821 |
+
If *origin* is ``None``, then the name is returned.
|
| 822 |
+
Otherwise, if *relativize* is ``True`` the name is
|
| 823 |
+
relativized, and if *relativize* is ``False`` the name is
|
| 824 |
+
derelativized.
|
| 825 |
+
|
| 826 |
+
Returns a ``dns.name.Name``.
|
| 827 |
+
"""
|
| 828 |
+
|
| 829 |
+
if origin:
|
| 830 |
+
if relativize:
|
| 831 |
+
return self.relativize(origin)
|
| 832 |
+
else:
|
| 833 |
+
return self.derelativize(origin)
|
| 834 |
+
else:
|
| 835 |
+
return self
|
| 836 |
+
|
| 837 |
+
def parent(self) -> "Name":
|
| 838 |
+
"""Return the parent of the name.
|
| 839 |
+
|
| 840 |
+
For example, the parent of ``www.dnspython.org.`` is ``dnspython.org``.
|
| 841 |
+
|
| 842 |
+
Raises ``dns.name.NoParent`` if the name is either the root name or the
|
| 843 |
+
empty name, and thus has no parent.
|
| 844 |
+
|
| 845 |
+
Returns a ``dns.name.Name``.
|
| 846 |
+
"""
|
| 847 |
+
|
| 848 |
+
if self == root or self == empty:
|
| 849 |
+
raise NoParent
|
| 850 |
+
return Name(self.labels[1:])
|
| 851 |
+
|
| 852 |
+
def predecessor(self, origin: "Name", prefix_ok: bool = True) -> "Name":
|
| 853 |
+
"""Return the maximal predecessor of *name* in the DNSSEC ordering in the zone
|
| 854 |
+
whose origin is *origin*, or return the longest name under *origin* if the
|
| 855 |
+
name is origin (i.e. wrap around to the longest name, which may still be
|
| 856 |
+
*origin* due to length considerations.
|
| 857 |
+
|
| 858 |
+
The relativity of the name is preserved, so if this name is relative
|
| 859 |
+
then the method will return a relative name, and likewise if this name
|
| 860 |
+
is absolute then the predecessor will be absolute.
|
| 861 |
+
|
| 862 |
+
*prefix_ok* indicates if prefixing labels is allowed, and
|
| 863 |
+
defaults to ``True``. Normally it is good to allow this, but if computing
|
| 864 |
+
a maximal predecessor at a zone cut point then ``False`` must be specified.
|
| 865 |
+
"""
|
| 866 |
+
return _handle_relativity_and_call(
|
| 867 |
+
_absolute_predecessor, self, origin, prefix_ok
|
| 868 |
+
)
|
| 869 |
+
|
| 870 |
+
def successor(self, origin: "Name", prefix_ok: bool = True) -> "Name":
|
| 871 |
+
"""Return the minimal successor of *name* in the DNSSEC ordering in the zone
|
| 872 |
+
whose origin is *origin*, or return *origin* if the successor cannot be
|
| 873 |
+
computed due to name length limitations.
|
| 874 |
+
|
| 875 |
+
Note that *origin* is returned in the "too long" cases because wrapping
|
| 876 |
+
around to the origin is how NSEC records express "end of the zone".
|
| 877 |
+
|
| 878 |
+
The relativity of the name is preserved, so if this name is relative
|
| 879 |
+
then the method will return a relative name, and likewise if this name
|
| 880 |
+
is absolute then the successor will be absolute.
|
| 881 |
+
|
| 882 |
+
*prefix_ok* indicates if prefixing a new minimal label is allowed, and
|
| 883 |
+
defaults to ``True``. Normally it is good to allow this, but if computing
|
| 884 |
+
a minimal successor at a zone cut point then ``False`` must be specified.
|
| 885 |
+
"""
|
| 886 |
+
return _handle_relativity_and_call(_absolute_successor, self, origin, prefix_ok)
|
| 887 |
+
|
| 888 |
+
|
| 889 |
+
#: The root name, '.'
|
| 890 |
+
root = Name([b""])
|
| 891 |
+
|
| 892 |
+
#: The empty name.
|
| 893 |
+
empty = Name([])
|
| 894 |
+
|
| 895 |
+
|
| 896 |
+
def from_unicode(
|
| 897 |
+
text: str, origin: Optional[Name] = root, idna_codec: Optional[IDNACodec] = None
|
| 898 |
+
) -> Name:
|
| 899 |
+
"""Convert unicode text into a Name object.
|
| 900 |
+
|
| 901 |
+
Labels are encoded in IDN ACE form according to rules specified by
|
| 902 |
+
the IDNA codec.
|
| 903 |
+
|
| 904 |
+
*text*, a ``str``, is the text to convert into a name.
|
| 905 |
+
|
| 906 |
+
*origin*, a ``dns.name.Name``, specifies the origin to
|
| 907 |
+
append to non-absolute names. The default is the root name.
|
| 908 |
+
|
| 909 |
+
*idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
|
| 910 |
+
encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder
|
| 911 |
+
is used.
|
| 912 |
+
|
| 913 |
+
Returns a ``dns.name.Name``.
|
| 914 |
+
"""
|
| 915 |
+
|
| 916 |
+
if not isinstance(text, str):
|
| 917 |
+
raise ValueError("input to from_unicode() must be a unicode string")
|
| 918 |
+
if not (origin is None or isinstance(origin, Name)):
|
| 919 |
+
raise ValueError("origin must be a Name or None")
|
| 920 |
+
labels = []
|
| 921 |
+
label = ""
|
| 922 |
+
escaping = False
|
| 923 |
+
edigits = 0
|
| 924 |
+
total = 0
|
| 925 |
+
if idna_codec is None:
|
| 926 |
+
idna_codec = IDNA_2003
|
| 927 |
+
if text == "@":
|
| 928 |
+
text = ""
|
| 929 |
+
if text:
|
| 930 |
+
if text in [".", "\u3002", "\uff0e", "\uff61"]:
|
| 931 |
+
return Name([b""]) # no Unicode "u" on this constant!
|
| 932 |
+
for c in text:
|
| 933 |
+
if escaping:
|
| 934 |
+
if edigits == 0:
|
| 935 |
+
if c.isdigit():
|
| 936 |
+
total = int(c)
|
| 937 |
+
edigits += 1
|
| 938 |
+
else:
|
| 939 |
+
label += c
|
| 940 |
+
escaping = False
|
| 941 |
+
else:
|
| 942 |
+
if not c.isdigit():
|
| 943 |
+
raise BadEscape
|
| 944 |
+
total *= 10
|
| 945 |
+
total += int(c)
|
| 946 |
+
edigits += 1
|
| 947 |
+
if edigits == 3:
|
| 948 |
+
escaping = False
|
| 949 |
+
label += chr(total)
|
| 950 |
+
elif c in [".", "\u3002", "\uff0e", "\uff61"]:
|
| 951 |
+
if len(label) == 0:
|
| 952 |
+
raise EmptyLabel
|
| 953 |
+
labels.append(idna_codec.encode(label))
|
| 954 |
+
label = ""
|
| 955 |
+
elif c == "\\":
|
| 956 |
+
escaping = True
|
| 957 |
+
edigits = 0
|
| 958 |
+
total = 0
|
| 959 |
+
else:
|
| 960 |
+
label += c
|
| 961 |
+
if escaping:
|
| 962 |
+
raise BadEscape
|
| 963 |
+
if len(label) > 0:
|
| 964 |
+
labels.append(idna_codec.encode(label))
|
| 965 |
+
else:
|
| 966 |
+
labels.append(b"")
|
| 967 |
+
|
| 968 |
+
if (len(labels) == 0 or labels[-1] != b"") and origin is not None:
|
| 969 |
+
labels.extend(list(origin.labels))
|
| 970 |
+
return Name(labels)
|
| 971 |
+
|
| 972 |
+
|
| 973 |
+
def is_all_ascii(text: str) -> bool:
|
| 974 |
+
for c in text:
|
| 975 |
+
if ord(c) > 0x7F:
|
| 976 |
+
return False
|
| 977 |
+
return True
|
| 978 |
+
|
| 979 |
+
|
| 980 |
+
def from_text(
|
| 981 |
+
text: Union[bytes, str],
|
| 982 |
+
origin: Optional[Name] = root,
|
| 983 |
+
idna_codec: Optional[IDNACodec] = None,
|
| 984 |
+
) -> Name:
|
| 985 |
+
"""Convert text into a Name object.
|
| 986 |
+
|
| 987 |
+
*text*, a ``bytes`` or ``str``, is the text to convert into a name.
|
| 988 |
+
|
| 989 |
+
*origin*, a ``dns.name.Name``, specifies the origin to
|
| 990 |
+
append to non-absolute names. The default is the root name.
|
| 991 |
+
|
| 992 |
+
*idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
|
| 993 |
+
encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder
|
| 994 |
+
is used.
|
| 995 |
+
|
| 996 |
+
Returns a ``dns.name.Name``.
|
| 997 |
+
"""
|
| 998 |
+
|
| 999 |
+
if isinstance(text, str):
|
| 1000 |
+
if not is_all_ascii(text):
|
| 1001 |
+
# Some codepoint in the input text is > 127, so IDNA applies.
|
| 1002 |
+
return from_unicode(text, origin, idna_codec)
|
| 1003 |
+
# The input is all ASCII, so treat this like an ordinary non-IDNA
|
| 1004 |
+
# domain name. Note that "all ASCII" is about the input text,
|
| 1005 |
+
# not the codepoints in the domain name. E.g. if text has value
|
| 1006 |
+
#
|
| 1007 |
+
# r'\150\151\152\153\154\155\156\157\158\159'
|
| 1008 |
+
#
|
| 1009 |
+
# then it's still "all ASCII" even though the domain name has
|
| 1010 |
+
# codepoints > 127.
|
| 1011 |
+
text = text.encode("ascii")
|
| 1012 |
+
if not isinstance(text, bytes):
|
| 1013 |
+
raise ValueError("input to from_text() must be a string")
|
| 1014 |
+
if not (origin is None or isinstance(origin, Name)):
|
| 1015 |
+
raise ValueError("origin must be a Name or None")
|
| 1016 |
+
labels = []
|
| 1017 |
+
label = b""
|
| 1018 |
+
escaping = False
|
| 1019 |
+
edigits = 0
|
| 1020 |
+
total = 0
|
| 1021 |
+
if text == b"@":
|
| 1022 |
+
text = b""
|
| 1023 |
+
if text:
|
| 1024 |
+
if text == b".":
|
| 1025 |
+
return Name([b""])
|
| 1026 |
+
for c in text:
|
| 1027 |
+
byte_ = struct.pack("!B", c)
|
| 1028 |
+
if escaping:
|
| 1029 |
+
if edigits == 0:
|
| 1030 |
+
if byte_.isdigit():
|
| 1031 |
+
total = int(byte_)
|
| 1032 |
+
edigits += 1
|
| 1033 |
+
else:
|
| 1034 |
+
label += byte_
|
| 1035 |
+
escaping = False
|
| 1036 |
+
else:
|
| 1037 |
+
if not byte_.isdigit():
|
| 1038 |
+
raise BadEscape
|
| 1039 |
+
total *= 10
|
| 1040 |
+
total += int(byte_)
|
| 1041 |
+
edigits += 1
|
| 1042 |
+
if edigits == 3:
|
| 1043 |
+
escaping = False
|
| 1044 |
+
label += struct.pack("!B", total)
|
| 1045 |
+
elif byte_ == b".":
|
| 1046 |
+
if len(label) == 0:
|
| 1047 |
+
raise EmptyLabel
|
| 1048 |
+
labels.append(label)
|
| 1049 |
+
label = b""
|
| 1050 |
+
elif byte_ == b"\\":
|
| 1051 |
+
escaping = True
|
| 1052 |
+
edigits = 0
|
| 1053 |
+
total = 0
|
| 1054 |
+
else:
|
| 1055 |
+
label += byte_
|
| 1056 |
+
if escaping:
|
| 1057 |
+
raise BadEscape
|
| 1058 |
+
if len(label) > 0:
|
| 1059 |
+
labels.append(label)
|
| 1060 |
+
else:
|
| 1061 |
+
labels.append(b"")
|
| 1062 |
+
if (len(labels) == 0 or labels[-1] != b"") and origin is not None:
|
| 1063 |
+
labels.extend(list(origin.labels))
|
| 1064 |
+
return Name(labels)
|
| 1065 |
+
|
| 1066 |
+
|
| 1067 |
+
# we need 'dns.wire.Parser' quoted as dns.name and dns.wire depend on each other.
|
| 1068 |
+
|
| 1069 |
+
|
| 1070 |
+
def from_wire_parser(parser: "dns.wire.Parser") -> Name:
|
| 1071 |
+
"""Convert possibly compressed wire format into a Name.
|
| 1072 |
+
|
| 1073 |
+
*parser* is a dns.wire.Parser.
|
| 1074 |
+
|
| 1075 |
+
Raises ``dns.name.BadPointer`` if a compression pointer did not
|
| 1076 |
+
point backwards in the message.
|
| 1077 |
+
|
| 1078 |
+
Raises ``dns.name.BadLabelType`` if an invalid label type was encountered.
|
| 1079 |
+
|
| 1080 |
+
Returns a ``dns.name.Name``
|
| 1081 |
+
"""
|
| 1082 |
+
|
| 1083 |
+
labels = []
|
| 1084 |
+
biggest_pointer = parser.current
|
| 1085 |
+
with parser.restore_furthest():
|
| 1086 |
+
count = parser.get_uint8()
|
| 1087 |
+
while count != 0:
|
| 1088 |
+
if count < 64:
|
| 1089 |
+
labels.append(parser.get_bytes(count))
|
| 1090 |
+
elif count >= 192:
|
| 1091 |
+
current = (count & 0x3F) * 256 + parser.get_uint8()
|
| 1092 |
+
if current >= biggest_pointer:
|
| 1093 |
+
raise BadPointer
|
| 1094 |
+
biggest_pointer = current
|
| 1095 |
+
parser.seek(current)
|
| 1096 |
+
else:
|
| 1097 |
+
raise BadLabelType
|
| 1098 |
+
count = parser.get_uint8()
|
| 1099 |
+
labels.append(b"")
|
| 1100 |
+
return Name(labels)
|
| 1101 |
+
|
| 1102 |
+
|
| 1103 |
+
def from_wire(message: bytes, current: int) -> Tuple[Name, int]:
|
| 1104 |
+
"""Convert possibly compressed wire format into a Name.
|
| 1105 |
+
|
| 1106 |
+
*message* is a ``bytes`` containing an entire DNS message in DNS
|
| 1107 |
+
wire form.
|
| 1108 |
+
|
| 1109 |
+
*current*, an ``int``, is the offset of the beginning of the name
|
| 1110 |
+
from the start of the message
|
| 1111 |
+
|
| 1112 |
+
Raises ``dns.name.BadPointer`` if a compression pointer did not
|
| 1113 |
+
point backwards in the message.
|
| 1114 |
+
|
| 1115 |
+
Raises ``dns.name.BadLabelType`` if an invalid label type was encountered.
|
| 1116 |
+
|
| 1117 |
+
Returns a ``(dns.name.Name, int)`` tuple consisting of the name
|
| 1118 |
+
that was read and the number of bytes of the wire format message
|
| 1119 |
+
which were consumed reading it.
|
| 1120 |
+
"""
|
| 1121 |
+
|
| 1122 |
+
if not isinstance(message, bytes):
|
| 1123 |
+
raise ValueError("input to from_wire() must be a byte string")
|
| 1124 |
+
parser = dns.wire.Parser(message, current)
|
| 1125 |
+
name = from_wire_parser(parser)
|
| 1126 |
+
return (name, parser.current - current)
|
| 1127 |
+
|
| 1128 |
+
|
| 1129 |
+
# RFC 4471 Support
|
| 1130 |
+
|
| 1131 |
+
_MINIMAL_OCTET = b"\x00"
|
| 1132 |
+
_MINIMAL_OCTET_VALUE = ord(_MINIMAL_OCTET)
|
| 1133 |
+
_SUCCESSOR_PREFIX = Name([_MINIMAL_OCTET])
|
| 1134 |
+
_MAXIMAL_OCTET = b"\xff"
|
| 1135 |
+
_MAXIMAL_OCTET_VALUE = ord(_MAXIMAL_OCTET)
|
| 1136 |
+
_AT_SIGN_VALUE = ord("@")
|
| 1137 |
+
_LEFT_SQUARE_BRACKET_VALUE = ord("[")
|
| 1138 |
+
|
| 1139 |
+
|
| 1140 |
+
def _wire_length(labels):
|
| 1141 |
+
return functools.reduce(lambda v, x: v + len(x) + 1, labels, 0)
|
| 1142 |
+
|
| 1143 |
+
|
| 1144 |
+
def _pad_to_max_name(name):
|
| 1145 |
+
needed = 255 - _wire_length(name.labels)
|
| 1146 |
+
new_labels = []
|
| 1147 |
+
while needed > 64:
|
| 1148 |
+
new_labels.append(_MAXIMAL_OCTET * 63)
|
| 1149 |
+
needed -= 64
|
| 1150 |
+
if needed >= 2:
|
| 1151 |
+
new_labels.append(_MAXIMAL_OCTET * (needed - 1))
|
| 1152 |
+
# Note we're already maximal in the needed == 1 case as while we'd like
|
| 1153 |
+
# to add one more byte as a new label, we can't, as adding a new non-empty
|
| 1154 |
+
# label requires at least 2 bytes.
|
| 1155 |
+
new_labels = list(reversed(new_labels))
|
| 1156 |
+
new_labels.extend(name.labels)
|
| 1157 |
+
return Name(new_labels)
|
| 1158 |
+
|
| 1159 |
+
|
| 1160 |
+
def _pad_to_max_label(label, suffix_labels):
|
| 1161 |
+
length = len(label)
|
| 1162 |
+
# We have to subtract one here to account for the length byte of label.
|
| 1163 |
+
remaining = 255 - _wire_length(suffix_labels) - length - 1
|
| 1164 |
+
if remaining <= 0:
|
| 1165 |
+
# Shouldn't happen!
|
| 1166 |
+
return label
|
| 1167 |
+
needed = min(63 - length, remaining)
|
| 1168 |
+
return label + _MAXIMAL_OCTET * needed
|
| 1169 |
+
|
| 1170 |
+
|
| 1171 |
+
def _absolute_predecessor(name: Name, origin: Name, prefix_ok: bool) -> Name:
|
| 1172 |
+
# This is the RFC 4471 predecessor algorithm using the "absolute method" of section
|
| 1173 |
+
# 3.1.1.
|
| 1174 |
+
#
|
| 1175 |
+
# Our caller must ensure that the name and origin are absolute, and that name is a
|
| 1176 |
+
# subdomain of origin.
|
| 1177 |
+
if name == origin:
|
| 1178 |
+
return _pad_to_max_name(name)
|
| 1179 |
+
least_significant_label = name[0]
|
| 1180 |
+
if least_significant_label == _MINIMAL_OCTET:
|
| 1181 |
+
return name.parent()
|
| 1182 |
+
least_octet = least_significant_label[-1]
|
| 1183 |
+
suffix_labels = name.labels[1:]
|
| 1184 |
+
if least_octet == _MINIMAL_OCTET_VALUE:
|
| 1185 |
+
new_labels = [least_significant_label[:-1]]
|
| 1186 |
+
else:
|
| 1187 |
+
octets = bytearray(least_significant_label)
|
| 1188 |
+
octet = octets[-1]
|
| 1189 |
+
if octet == _LEFT_SQUARE_BRACKET_VALUE:
|
| 1190 |
+
octet = _AT_SIGN_VALUE
|
| 1191 |
+
else:
|
| 1192 |
+
octet -= 1
|
| 1193 |
+
octets[-1] = octet
|
| 1194 |
+
least_significant_label = bytes(octets)
|
| 1195 |
+
new_labels = [_pad_to_max_label(least_significant_label, suffix_labels)]
|
| 1196 |
+
new_labels.extend(suffix_labels)
|
| 1197 |
+
name = Name(new_labels)
|
| 1198 |
+
if prefix_ok:
|
| 1199 |
+
return _pad_to_max_name(name)
|
| 1200 |
+
else:
|
| 1201 |
+
return name
|
| 1202 |
+
|
| 1203 |
+
|
| 1204 |
+
def _absolute_successor(name: Name, origin: Name, prefix_ok: bool) -> Name:
|
| 1205 |
+
# This is the RFC 4471 successor algorithm using the "absolute method" of section
|
| 1206 |
+
# 3.1.2.
|
| 1207 |
+
#
|
| 1208 |
+
# Our caller must ensure that the name and origin are absolute, and that name is a
|
| 1209 |
+
# subdomain of origin.
|
| 1210 |
+
if prefix_ok:
|
| 1211 |
+
# Try prefixing \000 as new label
|
| 1212 |
+
try:
|
| 1213 |
+
return _SUCCESSOR_PREFIX.concatenate(name)
|
| 1214 |
+
except NameTooLong:
|
| 1215 |
+
pass
|
| 1216 |
+
while name != origin:
|
| 1217 |
+
# Try extending the least significant label.
|
| 1218 |
+
least_significant_label = name[0]
|
| 1219 |
+
if len(least_significant_label) < 63:
|
| 1220 |
+
# We may be able to extend the least label with a minimal additional byte.
|
| 1221 |
+
# This is only "may" because we could have a maximal length name even though
|
| 1222 |
+
# the least significant label isn't maximally long.
|
| 1223 |
+
new_labels = [least_significant_label + _MINIMAL_OCTET]
|
| 1224 |
+
new_labels.extend(name.labels[1:])
|
| 1225 |
+
try:
|
| 1226 |
+
return dns.name.Name(new_labels)
|
| 1227 |
+
except dns.name.NameTooLong:
|
| 1228 |
+
pass
|
| 1229 |
+
# We can't extend the label either, so we'll try to increment the least
|
| 1230 |
+
# signficant non-maximal byte in it.
|
| 1231 |
+
octets = bytearray(least_significant_label)
|
| 1232 |
+
# We do this reversed iteration with an explicit indexing variable because
|
| 1233 |
+
# if we find something to increment, we're going to want to truncate everything
|
| 1234 |
+
# to the right of it.
|
| 1235 |
+
for i in range(len(octets) - 1, -1, -1):
|
| 1236 |
+
octet = octets[i]
|
| 1237 |
+
if octet == _MAXIMAL_OCTET_VALUE:
|
| 1238 |
+
# We can't increment this, so keep looking.
|
| 1239 |
+
continue
|
| 1240 |
+
# Finally, something we can increment. We have to apply a special rule for
|
| 1241 |
+
# incrementing "@", sending it to "[", because RFC 4034 6.1 says that when
|
| 1242 |
+
# comparing names, uppercase letters compare as if they were their
|
| 1243 |
+
# lower-case equivalents. If we increment "@" to "A", then it would compare
|
| 1244 |
+
# as "a", which is after "[", "\", "]", "^", "_", and "`", so we would have
|
| 1245 |
+
# skipped the most minimal successor, namely "[".
|
| 1246 |
+
if octet == _AT_SIGN_VALUE:
|
| 1247 |
+
octet = _LEFT_SQUARE_BRACKET_VALUE
|
| 1248 |
+
else:
|
| 1249 |
+
octet += 1
|
| 1250 |
+
octets[i] = octet
|
| 1251 |
+
# We can now truncate all of the maximal values we skipped (if any)
|
| 1252 |
+
new_labels = [bytes(octets[: i + 1])]
|
| 1253 |
+
new_labels.extend(name.labels[1:])
|
| 1254 |
+
# We haven't changed the length of the name, so the Name constructor will
|
| 1255 |
+
# always work.
|
| 1256 |
+
return Name(new_labels)
|
| 1257 |
+
# We couldn't increment, so chop off the least significant label and try
|
| 1258 |
+
# again.
|
| 1259 |
+
name = name.parent()
|
| 1260 |
+
|
| 1261 |
+
# We couldn't increment at all, so return the origin, as wrapping around is the
|
| 1262 |
+
# DNSSEC way.
|
| 1263 |
+
return origin
|
| 1264 |
+
|
| 1265 |
+
|
| 1266 |
+
def _handle_relativity_and_call(
|
| 1267 |
+
function: Callable[[Name, Name, bool], Name],
|
| 1268 |
+
name: Name,
|
| 1269 |
+
origin: Name,
|
| 1270 |
+
prefix_ok: bool,
|
| 1271 |
+
) -> Name:
|
| 1272 |
+
# Make "name" absolute if needed, ensure that the origin is absolute,
|
| 1273 |
+
# call function(), and then relativize the result if needed.
|
| 1274 |
+
if not origin.is_absolute():
|
| 1275 |
+
raise NeedAbsoluteNameOrOrigin
|
| 1276 |
+
relative = not name.is_absolute()
|
| 1277 |
+
if relative:
|
| 1278 |
+
name = name.derelativize(origin)
|
| 1279 |
+
elif not name.is_subdomain(origin):
|
| 1280 |
+
raise NeedSubdomainOfOrigin
|
| 1281 |
+
result_name = function(name, origin, prefix_ok)
|
| 1282 |
+
if relative:
|
| 1283 |
+
result_name = result_name.relativize(origin)
|
| 1284 |
+
return result_name
|
vllm/lib/python3.10/site-packages/dns/tokenizer.py
ADDED
|
@@ -0,0 +1,708 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
|
| 2 |
+
|
| 3 |
+
# Copyright (C) 2003-2017 Nominum, Inc.
|
| 4 |
+
#
|
| 5 |
+
# Permission to use, copy, modify, and distribute this software and its
|
| 6 |
+
# documentation for any purpose with or without fee is hereby granted,
|
| 7 |
+
# provided that the above copyright notice and this permission notice
|
| 8 |
+
# appear in all copies.
|
| 9 |
+
#
|
| 10 |
+
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
| 11 |
+
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
| 12 |
+
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
| 13 |
+
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
| 14 |
+
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
| 15 |
+
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
| 16 |
+
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
| 17 |
+
|
| 18 |
+
"""Tokenize DNS zone file format"""
|
| 19 |
+
|
| 20 |
+
import io
|
| 21 |
+
import sys
|
| 22 |
+
from typing import Any, List, Optional, Tuple
|
| 23 |
+
|
| 24 |
+
import dns.exception
|
| 25 |
+
import dns.name
|
| 26 |
+
import dns.ttl
|
| 27 |
+
|
| 28 |
+
_DELIMITERS = {" ", "\t", "\n", ";", "(", ")", '"'}
|
| 29 |
+
_QUOTING_DELIMITERS = {'"'}
|
| 30 |
+
|
| 31 |
+
EOF = 0
|
| 32 |
+
EOL = 1
|
| 33 |
+
WHITESPACE = 2
|
| 34 |
+
IDENTIFIER = 3
|
| 35 |
+
QUOTED_STRING = 4
|
| 36 |
+
COMMENT = 5
|
| 37 |
+
DELIMITER = 6
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class UngetBufferFull(dns.exception.DNSException):
|
| 41 |
+
"""An attempt was made to unget a token when the unget buffer was full."""
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class Token:
|
| 45 |
+
"""A DNS zone file format token.
|
| 46 |
+
|
| 47 |
+
ttype: The token type
|
| 48 |
+
value: The token value
|
| 49 |
+
has_escape: Does the token value contain escapes?
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
def __init__(
|
| 53 |
+
self,
|
| 54 |
+
ttype: int,
|
| 55 |
+
value: Any = "",
|
| 56 |
+
has_escape: bool = False,
|
| 57 |
+
comment: Optional[str] = None,
|
| 58 |
+
):
|
| 59 |
+
"""Initialize a token instance."""
|
| 60 |
+
|
| 61 |
+
self.ttype = ttype
|
| 62 |
+
self.value = value
|
| 63 |
+
self.has_escape = has_escape
|
| 64 |
+
self.comment = comment
|
| 65 |
+
|
| 66 |
+
def is_eof(self) -> bool:
|
| 67 |
+
return self.ttype == EOF
|
| 68 |
+
|
| 69 |
+
def is_eol(self) -> bool:
|
| 70 |
+
return self.ttype == EOL
|
| 71 |
+
|
| 72 |
+
def is_whitespace(self) -> bool:
|
| 73 |
+
return self.ttype == WHITESPACE
|
| 74 |
+
|
| 75 |
+
def is_identifier(self) -> bool:
|
| 76 |
+
return self.ttype == IDENTIFIER
|
| 77 |
+
|
| 78 |
+
def is_quoted_string(self) -> bool:
|
| 79 |
+
return self.ttype == QUOTED_STRING
|
| 80 |
+
|
| 81 |
+
def is_comment(self) -> bool:
|
| 82 |
+
return self.ttype == COMMENT
|
| 83 |
+
|
| 84 |
+
def is_delimiter(self) -> bool: # pragma: no cover (we don't return delimiters yet)
|
| 85 |
+
return self.ttype == DELIMITER
|
| 86 |
+
|
| 87 |
+
def is_eol_or_eof(self) -> bool:
|
| 88 |
+
return self.ttype == EOL or self.ttype == EOF
|
| 89 |
+
|
| 90 |
+
def __eq__(self, other):
|
| 91 |
+
if not isinstance(other, Token):
|
| 92 |
+
return False
|
| 93 |
+
return self.ttype == other.ttype and self.value == other.value
|
| 94 |
+
|
| 95 |
+
def __ne__(self, other):
|
| 96 |
+
if not isinstance(other, Token):
|
| 97 |
+
return True
|
| 98 |
+
return self.ttype != other.ttype or self.value != other.value
|
| 99 |
+
|
| 100 |
+
def __str__(self):
|
| 101 |
+
return '%d "%s"' % (self.ttype, self.value)
|
| 102 |
+
|
| 103 |
+
def unescape(self) -> "Token":
|
| 104 |
+
if not self.has_escape:
|
| 105 |
+
return self
|
| 106 |
+
unescaped = ""
|
| 107 |
+
l = len(self.value)
|
| 108 |
+
i = 0
|
| 109 |
+
while i < l:
|
| 110 |
+
c = self.value[i]
|
| 111 |
+
i += 1
|
| 112 |
+
if c == "\\":
|
| 113 |
+
if i >= l: # pragma: no cover (can't happen via get())
|
| 114 |
+
raise dns.exception.UnexpectedEnd
|
| 115 |
+
c = self.value[i]
|
| 116 |
+
i += 1
|
| 117 |
+
if c.isdigit():
|
| 118 |
+
if i >= l:
|
| 119 |
+
raise dns.exception.UnexpectedEnd
|
| 120 |
+
c2 = self.value[i]
|
| 121 |
+
i += 1
|
| 122 |
+
if i >= l:
|
| 123 |
+
raise dns.exception.UnexpectedEnd
|
| 124 |
+
c3 = self.value[i]
|
| 125 |
+
i += 1
|
| 126 |
+
if not (c2.isdigit() and c3.isdigit()):
|
| 127 |
+
raise dns.exception.SyntaxError
|
| 128 |
+
codepoint = int(c) * 100 + int(c2) * 10 + int(c3)
|
| 129 |
+
if codepoint > 255:
|
| 130 |
+
raise dns.exception.SyntaxError
|
| 131 |
+
c = chr(codepoint)
|
| 132 |
+
unescaped += c
|
| 133 |
+
return Token(self.ttype, unescaped)
|
| 134 |
+
|
| 135 |
+
def unescape_to_bytes(self) -> "Token":
|
| 136 |
+
# We used to use unescape() for TXT-like records, but this
|
| 137 |
+
# caused problems as we'd process DNS escapes into Unicode code
|
| 138 |
+
# points instead of byte values, and then a to_text() of the
|
| 139 |
+
# processed data would not equal the original input. For
|
| 140 |
+
# example, \226 in the TXT record would have a to_text() of
|
| 141 |
+
# \195\162 because we applied UTF-8 encoding to Unicode code
|
| 142 |
+
# point 226.
|
| 143 |
+
#
|
| 144 |
+
# We now apply escapes while converting directly to bytes,
|
| 145 |
+
# avoiding this double encoding.
|
| 146 |
+
#
|
| 147 |
+
# This code also handles cases where the unicode input has
|
| 148 |
+
# non-ASCII code-points in it by converting it to UTF-8. TXT
|
| 149 |
+
# records aren't defined for Unicode, but this is the best we
|
| 150 |
+
# can do to preserve meaning. For example,
|
| 151 |
+
#
|
| 152 |
+
# foo\u200bbar
|
| 153 |
+
#
|
| 154 |
+
# (where \u200b is Unicode code point 0x200b) will be treated
|
| 155 |
+
# as if the input had been the UTF-8 encoding of that string,
|
| 156 |
+
# namely:
|
| 157 |
+
#
|
| 158 |
+
# foo\226\128\139bar
|
| 159 |
+
#
|
| 160 |
+
unescaped = b""
|
| 161 |
+
l = len(self.value)
|
| 162 |
+
i = 0
|
| 163 |
+
while i < l:
|
| 164 |
+
c = self.value[i]
|
| 165 |
+
i += 1
|
| 166 |
+
if c == "\\":
|
| 167 |
+
if i >= l: # pragma: no cover (can't happen via get())
|
| 168 |
+
raise dns.exception.UnexpectedEnd
|
| 169 |
+
c = self.value[i]
|
| 170 |
+
i += 1
|
| 171 |
+
if c.isdigit():
|
| 172 |
+
if i >= l:
|
| 173 |
+
raise dns.exception.UnexpectedEnd
|
| 174 |
+
c2 = self.value[i]
|
| 175 |
+
i += 1
|
| 176 |
+
if i >= l:
|
| 177 |
+
raise dns.exception.UnexpectedEnd
|
| 178 |
+
c3 = self.value[i]
|
| 179 |
+
i += 1
|
| 180 |
+
if not (c2.isdigit() and c3.isdigit()):
|
| 181 |
+
raise dns.exception.SyntaxError
|
| 182 |
+
codepoint = int(c) * 100 + int(c2) * 10 + int(c3)
|
| 183 |
+
if codepoint > 255:
|
| 184 |
+
raise dns.exception.SyntaxError
|
| 185 |
+
unescaped += b"%c" % (codepoint)
|
| 186 |
+
else:
|
| 187 |
+
# Note that as mentioned above, if c is a Unicode
|
| 188 |
+
# code point outside of the ASCII range, then this
|
| 189 |
+
# += is converting that code point to its UTF-8
|
| 190 |
+
# encoding and appending multiple bytes to
|
| 191 |
+
# unescaped.
|
| 192 |
+
unescaped += c.encode()
|
| 193 |
+
else:
|
| 194 |
+
unescaped += c.encode()
|
| 195 |
+
return Token(self.ttype, bytes(unescaped))
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
class Tokenizer:
|
| 199 |
+
"""A DNS zone file format tokenizer.
|
| 200 |
+
|
| 201 |
+
A token object is basically a (type, value) tuple. The valid
|
| 202 |
+
types are EOF, EOL, WHITESPACE, IDENTIFIER, QUOTED_STRING,
|
| 203 |
+
COMMENT, and DELIMITER.
|
| 204 |
+
|
| 205 |
+
file: The file to tokenize
|
| 206 |
+
|
| 207 |
+
ungotten_char: The most recently ungotten character, or None.
|
| 208 |
+
|
| 209 |
+
ungotten_token: The most recently ungotten token, or None.
|
| 210 |
+
|
| 211 |
+
multiline: The current multiline level. This value is increased
|
| 212 |
+
by one every time a '(' delimiter is read, and decreased by one every time
|
| 213 |
+
a ')' delimiter is read.
|
| 214 |
+
|
| 215 |
+
quoting: This variable is true if the tokenizer is currently
|
| 216 |
+
reading a quoted string.
|
| 217 |
+
|
| 218 |
+
eof: This variable is true if the tokenizer has encountered EOF.
|
| 219 |
+
|
| 220 |
+
delimiters: The current delimiter dictionary.
|
| 221 |
+
|
| 222 |
+
line_number: The current line number
|
| 223 |
+
|
| 224 |
+
filename: A filename that will be returned by the where() method.
|
| 225 |
+
|
| 226 |
+
idna_codec: A dns.name.IDNACodec, specifies the IDNA
|
| 227 |
+
encoder/decoder. If None, the default IDNA 2003
|
| 228 |
+
encoder/decoder is used.
|
| 229 |
+
"""
|
| 230 |
+
|
| 231 |
+
def __init__(
|
| 232 |
+
self,
|
| 233 |
+
f: Any = sys.stdin,
|
| 234 |
+
filename: Optional[str] = None,
|
| 235 |
+
idna_codec: Optional[dns.name.IDNACodec] = None,
|
| 236 |
+
):
|
| 237 |
+
"""Initialize a tokenizer instance.
|
| 238 |
+
|
| 239 |
+
f: The file to tokenize. The default is sys.stdin.
|
| 240 |
+
This parameter may also be a string, in which case the tokenizer
|
| 241 |
+
will take its input from the contents of the string.
|
| 242 |
+
|
| 243 |
+
filename: the name of the filename that the where() method
|
| 244 |
+
will return.
|
| 245 |
+
|
| 246 |
+
idna_codec: A dns.name.IDNACodec, specifies the IDNA
|
| 247 |
+
encoder/decoder. If None, the default IDNA 2003
|
| 248 |
+
encoder/decoder is used.
|
| 249 |
+
"""
|
| 250 |
+
|
| 251 |
+
if isinstance(f, str):
|
| 252 |
+
f = io.StringIO(f)
|
| 253 |
+
if filename is None:
|
| 254 |
+
filename = "<string>"
|
| 255 |
+
elif isinstance(f, bytes):
|
| 256 |
+
f = io.StringIO(f.decode())
|
| 257 |
+
if filename is None:
|
| 258 |
+
filename = "<string>"
|
| 259 |
+
else:
|
| 260 |
+
if filename is None:
|
| 261 |
+
if f is sys.stdin:
|
| 262 |
+
filename = "<stdin>"
|
| 263 |
+
else:
|
| 264 |
+
filename = "<file>"
|
| 265 |
+
self.file = f
|
| 266 |
+
self.ungotten_char: Optional[str] = None
|
| 267 |
+
self.ungotten_token: Optional[Token] = None
|
| 268 |
+
self.multiline = 0
|
| 269 |
+
self.quoting = False
|
| 270 |
+
self.eof = False
|
| 271 |
+
self.delimiters = _DELIMITERS
|
| 272 |
+
self.line_number = 1
|
| 273 |
+
assert filename is not None
|
| 274 |
+
self.filename = filename
|
| 275 |
+
if idna_codec is None:
|
| 276 |
+
self.idna_codec: dns.name.IDNACodec = dns.name.IDNA_2003
|
| 277 |
+
else:
|
| 278 |
+
self.idna_codec = idna_codec
|
| 279 |
+
|
| 280 |
+
def _get_char(self) -> str:
|
| 281 |
+
"""Read a character from input."""
|
| 282 |
+
|
| 283 |
+
if self.ungotten_char is None:
|
| 284 |
+
if self.eof:
|
| 285 |
+
c = ""
|
| 286 |
+
else:
|
| 287 |
+
c = self.file.read(1)
|
| 288 |
+
if c == "":
|
| 289 |
+
self.eof = True
|
| 290 |
+
elif c == "\n":
|
| 291 |
+
self.line_number += 1
|
| 292 |
+
else:
|
| 293 |
+
c = self.ungotten_char
|
| 294 |
+
self.ungotten_char = None
|
| 295 |
+
return c
|
| 296 |
+
|
| 297 |
+
def where(self) -> Tuple[str, int]:
|
| 298 |
+
"""Return the current location in the input.
|
| 299 |
+
|
| 300 |
+
Returns a (string, int) tuple. The first item is the filename of
|
| 301 |
+
the input, the second is the current line number.
|
| 302 |
+
"""
|
| 303 |
+
|
| 304 |
+
return (self.filename, self.line_number)
|
| 305 |
+
|
| 306 |
+
def _unget_char(self, c: str) -> None:
|
| 307 |
+
"""Unget a character.
|
| 308 |
+
|
| 309 |
+
The unget buffer for characters is only one character large; it is
|
| 310 |
+
an error to try to unget a character when the unget buffer is not
|
| 311 |
+
empty.
|
| 312 |
+
|
| 313 |
+
c: the character to unget
|
| 314 |
+
raises UngetBufferFull: there is already an ungotten char
|
| 315 |
+
"""
|
| 316 |
+
|
| 317 |
+
if self.ungotten_char is not None:
|
| 318 |
+
# this should never happen!
|
| 319 |
+
raise UngetBufferFull # pragma: no cover
|
| 320 |
+
self.ungotten_char = c
|
| 321 |
+
|
| 322 |
+
def skip_whitespace(self) -> int:
|
| 323 |
+
"""Consume input until a non-whitespace character is encountered.
|
| 324 |
+
|
| 325 |
+
The non-whitespace character is then ungotten, and the number of
|
| 326 |
+
whitespace characters consumed is returned.
|
| 327 |
+
|
| 328 |
+
If the tokenizer is in multiline mode, then newlines are whitespace.
|
| 329 |
+
|
| 330 |
+
Returns the number of characters skipped.
|
| 331 |
+
"""
|
| 332 |
+
|
| 333 |
+
skipped = 0
|
| 334 |
+
while True:
|
| 335 |
+
c = self._get_char()
|
| 336 |
+
if c != " " and c != "\t":
|
| 337 |
+
if (c != "\n") or not self.multiline:
|
| 338 |
+
self._unget_char(c)
|
| 339 |
+
return skipped
|
| 340 |
+
skipped += 1
|
| 341 |
+
|
| 342 |
+
def get(self, want_leading: bool = False, want_comment: bool = False) -> Token:
|
| 343 |
+
"""Get the next token.
|
| 344 |
+
|
| 345 |
+
want_leading: If True, return a WHITESPACE token if the
|
| 346 |
+
first character read is whitespace. The default is False.
|
| 347 |
+
|
| 348 |
+
want_comment: If True, return a COMMENT token if the
|
| 349 |
+
first token read is a comment. The default is False.
|
| 350 |
+
|
| 351 |
+
Raises dns.exception.UnexpectedEnd: input ended prematurely
|
| 352 |
+
|
| 353 |
+
Raises dns.exception.SyntaxError: input was badly formed
|
| 354 |
+
|
| 355 |
+
Returns a Token.
|
| 356 |
+
"""
|
| 357 |
+
|
| 358 |
+
if self.ungotten_token is not None:
|
| 359 |
+
utoken = self.ungotten_token
|
| 360 |
+
self.ungotten_token = None
|
| 361 |
+
if utoken.is_whitespace():
|
| 362 |
+
if want_leading:
|
| 363 |
+
return utoken
|
| 364 |
+
elif utoken.is_comment():
|
| 365 |
+
if want_comment:
|
| 366 |
+
return utoken
|
| 367 |
+
else:
|
| 368 |
+
return utoken
|
| 369 |
+
skipped = self.skip_whitespace()
|
| 370 |
+
if want_leading and skipped > 0:
|
| 371 |
+
return Token(WHITESPACE, " ")
|
| 372 |
+
token = ""
|
| 373 |
+
ttype = IDENTIFIER
|
| 374 |
+
has_escape = False
|
| 375 |
+
while True:
|
| 376 |
+
c = self._get_char()
|
| 377 |
+
if c == "" or c in self.delimiters:
|
| 378 |
+
if c == "" and self.quoting:
|
| 379 |
+
raise dns.exception.UnexpectedEnd
|
| 380 |
+
if token == "" and ttype != QUOTED_STRING:
|
| 381 |
+
if c == "(":
|
| 382 |
+
self.multiline += 1
|
| 383 |
+
self.skip_whitespace()
|
| 384 |
+
continue
|
| 385 |
+
elif c == ")":
|
| 386 |
+
if self.multiline <= 0:
|
| 387 |
+
raise dns.exception.SyntaxError
|
| 388 |
+
self.multiline -= 1
|
| 389 |
+
self.skip_whitespace()
|
| 390 |
+
continue
|
| 391 |
+
elif c == '"':
|
| 392 |
+
if not self.quoting:
|
| 393 |
+
self.quoting = True
|
| 394 |
+
self.delimiters = _QUOTING_DELIMITERS
|
| 395 |
+
ttype = QUOTED_STRING
|
| 396 |
+
continue
|
| 397 |
+
else:
|
| 398 |
+
self.quoting = False
|
| 399 |
+
self.delimiters = _DELIMITERS
|
| 400 |
+
self.skip_whitespace()
|
| 401 |
+
continue
|
| 402 |
+
elif c == "\n":
|
| 403 |
+
return Token(EOL, "\n")
|
| 404 |
+
elif c == ";":
|
| 405 |
+
while 1:
|
| 406 |
+
c = self._get_char()
|
| 407 |
+
if c == "\n" or c == "":
|
| 408 |
+
break
|
| 409 |
+
token += c
|
| 410 |
+
if want_comment:
|
| 411 |
+
self._unget_char(c)
|
| 412 |
+
return Token(COMMENT, token)
|
| 413 |
+
elif c == "":
|
| 414 |
+
if self.multiline:
|
| 415 |
+
raise dns.exception.SyntaxError(
|
| 416 |
+
"unbalanced parentheses"
|
| 417 |
+
)
|
| 418 |
+
return Token(EOF, comment=token)
|
| 419 |
+
elif self.multiline:
|
| 420 |
+
self.skip_whitespace()
|
| 421 |
+
token = ""
|
| 422 |
+
continue
|
| 423 |
+
else:
|
| 424 |
+
return Token(EOL, "\n", comment=token)
|
| 425 |
+
else:
|
| 426 |
+
# This code exists in case we ever want a
|
| 427 |
+
# delimiter to be returned. It never produces
|
| 428 |
+
# a token currently.
|
| 429 |
+
token = c
|
| 430 |
+
ttype = DELIMITER
|
| 431 |
+
else:
|
| 432 |
+
self._unget_char(c)
|
| 433 |
+
break
|
| 434 |
+
elif self.quoting and c == "\n":
|
| 435 |
+
raise dns.exception.SyntaxError("newline in quoted string")
|
| 436 |
+
elif c == "\\":
|
| 437 |
+
#
|
| 438 |
+
# It's an escape. Put it and the next character into
|
| 439 |
+
# the token; it will be checked later for goodness.
|
| 440 |
+
#
|
| 441 |
+
token += c
|
| 442 |
+
has_escape = True
|
| 443 |
+
c = self._get_char()
|
| 444 |
+
if c == "" or (c == "\n" and not self.quoting):
|
| 445 |
+
raise dns.exception.UnexpectedEnd
|
| 446 |
+
token += c
|
| 447 |
+
if token == "" and ttype != QUOTED_STRING:
|
| 448 |
+
if self.multiline:
|
| 449 |
+
raise dns.exception.SyntaxError("unbalanced parentheses")
|
| 450 |
+
ttype = EOF
|
| 451 |
+
return Token(ttype, token, has_escape)
|
| 452 |
+
|
| 453 |
+
def unget(self, token: Token) -> None:
|
| 454 |
+
"""Unget a token.
|
| 455 |
+
|
| 456 |
+
The unget buffer for tokens is only one token large; it is
|
| 457 |
+
an error to try to unget a token when the unget buffer is not
|
| 458 |
+
empty.
|
| 459 |
+
|
| 460 |
+
token: the token to unget
|
| 461 |
+
|
| 462 |
+
Raises UngetBufferFull: there is already an ungotten token
|
| 463 |
+
"""
|
| 464 |
+
|
| 465 |
+
if self.ungotten_token is not None:
|
| 466 |
+
raise UngetBufferFull
|
| 467 |
+
self.ungotten_token = token
|
| 468 |
+
|
| 469 |
+
def next(self):
|
| 470 |
+
"""Return the next item in an iteration.
|
| 471 |
+
|
| 472 |
+
Returns a Token.
|
| 473 |
+
"""
|
| 474 |
+
|
| 475 |
+
token = self.get()
|
| 476 |
+
if token.is_eof():
|
| 477 |
+
raise StopIteration
|
| 478 |
+
return token
|
| 479 |
+
|
| 480 |
+
__next__ = next
|
| 481 |
+
|
| 482 |
+
def __iter__(self):
|
| 483 |
+
return self
|
| 484 |
+
|
| 485 |
+
# Helpers
|
| 486 |
+
|
| 487 |
+
def get_int(self, base: int = 10) -> int:
|
| 488 |
+
"""Read the next token and interpret it as an unsigned integer.
|
| 489 |
+
|
| 490 |
+
Raises dns.exception.SyntaxError if not an unsigned integer.
|
| 491 |
+
|
| 492 |
+
Returns an int.
|
| 493 |
+
"""
|
| 494 |
+
|
| 495 |
+
token = self.get().unescape()
|
| 496 |
+
if not token.is_identifier():
|
| 497 |
+
raise dns.exception.SyntaxError("expecting an identifier")
|
| 498 |
+
if not token.value.isdigit():
|
| 499 |
+
raise dns.exception.SyntaxError("expecting an integer")
|
| 500 |
+
return int(token.value, base)
|
| 501 |
+
|
| 502 |
+
def get_uint8(self) -> int:
|
| 503 |
+
"""Read the next token and interpret it as an 8-bit unsigned
|
| 504 |
+
integer.
|
| 505 |
+
|
| 506 |
+
Raises dns.exception.SyntaxError if not an 8-bit unsigned integer.
|
| 507 |
+
|
| 508 |
+
Returns an int.
|
| 509 |
+
"""
|
| 510 |
+
|
| 511 |
+
value = self.get_int()
|
| 512 |
+
if value < 0 or value > 255:
|
| 513 |
+
raise dns.exception.SyntaxError(
|
| 514 |
+
"%d is not an unsigned 8-bit integer" % value
|
| 515 |
+
)
|
| 516 |
+
return value
|
| 517 |
+
|
| 518 |
+
def get_uint16(self, base: int = 10) -> int:
|
| 519 |
+
"""Read the next token and interpret it as a 16-bit unsigned
|
| 520 |
+
integer.
|
| 521 |
+
|
| 522 |
+
Raises dns.exception.SyntaxError if not a 16-bit unsigned integer.
|
| 523 |
+
|
| 524 |
+
Returns an int.
|
| 525 |
+
"""
|
| 526 |
+
|
| 527 |
+
value = self.get_int(base=base)
|
| 528 |
+
if value < 0 or value > 65535:
|
| 529 |
+
if base == 8:
|
| 530 |
+
raise dns.exception.SyntaxError(
|
| 531 |
+
f"{value:o} is not an octal unsigned 16-bit integer"
|
| 532 |
+
)
|
| 533 |
+
else:
|
| 534 |
+
raise dns.exception.SyntaxError(
|
| 535 |
+
"%d is not an unsigned 16-bit integer" % value
|
| 536 |
+
)
|
| 537 |
+
return value
|
| 538 |
+
|
| 539 |
+
def get_uint32(self, base: int = 10) -> int:
|
| 540 |
+
"""Read the next token and interpret it as a 32-bit unsigned
|
| 541 |
+
integer.
|
| 542 |
+
|
| 543 |
+
Raises dns.exception.SyntaxError if not a 32-bit unsigned integer.
|
| 544 |
+
|
| 545 |
+
Returns an int.
|
| 546 |
+
"""
|
| 547 |
+
|
| 548 |
+
value = self.get_int(base=base)
|
| 549 |
+
if value < 0 or value > 4294967295:
|
| 550 |
+
raise dns.exception.SyntaxError(
|
| 551 |
+
"%d is not an unsigned 32-bit integer" % value
|
| 552 |
+
)
|
| 553 |
+
return value
|
| 554 |
+
|
| 555 |
+
def get_uint48(self, base: int = 10) -> int:
|
| 556 |
+
"""Read the next token and interpret it as a 48-bit unsigned
|
| 557 |
+
integer.
|
| 558 |
+
|
| 559 |
+
Raises dns.exception.SyntaxError if not a 48-bit unsigned integer.
|
| 560 |
+
|
| 561 |
+
Returns an int.
|
| 562 |
+
"""
|
| 563 |
+
|
| 564 |
+
value = self.get_int(base=base)
|
| 565 |
+
if value < 0 or value > 281474976710655:
|
| 566 |
+
raise dns.exception.SyntaxError(
|
| 567 |
+
"%d is not an unsigned 48-bit integer" % value
|
| 568 |
+
)
|
| 569 |
+
return value
|
| 570 |
+
|
| 571 |
+
def get_string(self, max_length: Optional[int] = None) -> str:
|
| 572 |
+
"""Read the next token and interpret it as a string.
|
| 573 |
+
|
| 574 |
+
Raises dns.exception.SyntaxError if not a string.
|
| 575 |
+
Raises dns.exception.SyntaxError if token value length
|
| 576 |
+
exceeds max_length (if specified).
|
| 577 |
+
|
| 578 |
+
Returns a string.
|
| 579 |
+
"""
|
| 580 |
+
|
| 581 |
+
token = self.get().unescape()
|
| 582 |
+
if not (token.is_identifier() or token.is_quoted_string()):
|
| 583 |
+
raise dns.exception.SyntaxError("expecting a string")
|
| 584 |
+
if max_length and len(token.value) > max_length:
|
| 585 |
+
raise dns.exception.SyntaxError("string too long")
|
| 586 |
+
return token.value
|
| 587 |
+
|
| 588 |
+
def get_identifier(self) -> str:
|
| 589 |
+
"""Read the next token, which should be an identifier.
|
| 590 |
+
|
| 591 |
+
Raises dns.exception.SyntaxError if not an identifier.
|
| 592 |
+
|
| 593 |
+
Returns a string.
|
| 594 |
+
"""
|
| 595 |
+
|
| 596 |
+
token = self.get().unescape()
|
| 597 |
+
if not token.is_identifier():
|
| 598 |
+
raise dns.exception.SyntaxError("expecting an identifier")
|
| 599 |
+
return token.value
|
| 600 |
+
|
| 601 |
+
def get_remaining(self, max_tokens: Optional[int] = None) -> List[Token]:
|
| 602 |
+
"""Return the remaining tokens on the line, until an EOL or EOF is seen.
|
| 603 |
+
|
| 604 |
+
max_tokens: If not None, stop after this number of tokens.
|
| 605 |
+
|
| 606 |
+
Returns a list of tokens.
|
| 607 |
+
"""
|
| 608 |
+
|
| 609 |
+
tokens = []
|
| 610 |
+
while True:
|
| 611 |
+
token = self.get()
|
| 612 |
+
if token.is_eol_or_eof():
|
| 613 |
+
self.unget(token)
|
| 614 |
+
break
|
| 615 |
+
tokens.append(token)
|
| 616 |
+
if len(tokens) == max_tokens:
|
| 617 |
+
break
|
| 618 |
+
return tokens
|
| 619 |
+
|
| 620 |
+
def concatenate_remaining_identifiers(self, allow_empty: bool = False) -> str:
|
| 621 |
+
"""Read the remaining tokens on the line, which should be identifiers.
|
| 622 |
+
|
| 623 |
+
Raises dns.exception.SyntaxError if there are no remaining tokens,
|
| 624 |
+
unless `allow_empty=True` is given.
|
| 625 |
+
|
| 626 |
+
Raises dns.exception.SyntaxError if a token is seen that is not an
|
| 627 |
+
identifier.
|
| 628 |
+
|
| 629 |
+
Returns a string containing a concatenation of the remaining
|
| 630 |
+
identifiers.
|
| 631 |
+
"""
|
| 632 |
+
s = ""
|
| 633 |
+
while True:
|
| 634 |
+
token = self.get().unescape()
|
| 635 |
+
if token.is_eol_or_eof():
|
| 636 |
+
self.unget(token)
|
| 637 |
+
break
|
| 638 |
+
if not token.is_identifier():
|
| 639 |
+
raise dns.exception.SyntaxError
|
| 640 |
+
s += token.value
|
| 641 |
+
if not (allow_empty or s):
|
| 642 |
+
raise dns.exception.SyntaxError("expecting another identifier")
|
| 643 |
+
return s
|
| 644 |
+
|
| 645 |
+
def as_name(
|
| 646 |
+
self,
|
| 647 |
+
token: Token,
|
| 648 |
+
origin: Optional[dns.name.Name] = None,
|
| 649 |
+
relativize: bool = False,
|
| 650 |
+
relativize_to: Optional[dns.name.Name] = None,
|
| 651 |
+
) -> dns.name.Name:
|
| 652 |
+
"""Try to interpret the token as a DNS name.
|
| 653 |
+
|
| 654 |
+
Raises dns.exception.SyntaxError if not a name.
|
| 655 |
+
|
| 656 |
+
Returns a dns.name.Name.
|
| 657 |
+
"""
|
| 658 |
+
if not token.is_identifier():
|
| 659 |
+
raise dns.exception.SyntaxError("expecting an identifier")
|
| 660 |
+
name = dns.name.from_text(token.value, origin, self.idna_codec)
|
| 661 |
+
return name.choose_relativity(relativize_to or origin, relativize)
|
| 662 |
+
|
| 663 |
+
def get_name(
|
| 664 |
+
self,
|
| 665 |
+
origin: Optional[dns.name.Name] = None,
|
| 666 |
+
relativize: bool = False,
|
| 667 |
+
relativize_to: Optional[dns.name.Name] = None,
|
| 668 |
+
) -> dns.name.Name:
|
| 669 |
+
"""Read the next token and interpret it as a DNS name.
|
| 670 |
+
|
| 671 |
+
Raises dns.exception.SyntaxError if not a name.
|
| 672 |
+
|
| 673 |
+
Returns a dns.name.Name.
|
| 674 |
+
"""
|
| 675 |
+
|
| 676 |
+
token = self.get()
|
| 677 |
+
return self.as_name(token, origin, relativize, relativize_to)
|
| 678 |
+
|
| 679 |
+
def get_eol_as_token(self) -> Token:
|
| 680 |
+
"""Read the next token and raise an exception if it isn't EOL or
|
| 681 |
+
EOF.
|
| 682 |
+
|
| 683 |
+
Returns a string.
|
| 684 |
+
"""
|
| 685 |
+
|
| 686 |
+
token = self.get()
|
| 687 |
+
if not token.is_eol_or_eof():
|
| 688 |
+
raise dns.exception.SyntaxError(
|
| 689 |
+
'expected EOL or EOF, got %d "%s"' % (token.ttype, token.value)
|
| 690 |
+
)
|
| 691 |
+
return token
|
| 692 |
+
|
| 693 |
+
def get_eol(self) -> str:
|
| 694 |
+
return self.get_eol_as_token().value
|
| 695 |
+
|
| 696 |
+
def get_ttl(self) -> int:
|
| 697 |
+
"""Read the next token and interpret it as a DNS TTL.
|
| 698 |
+
|
| 699 |
+
Raises dns.exception.SyntaxError or dns.ttl.BadTTL if not an
|
| 700 |
+
identifier or badly formed.
|
| 701 |
+
|
| 702 |
+
Returns an int.
|
| 703 |
+
"""
|
| 704 |
+
|
| 705 |
+
token = self.get().unescape()
|
| 706 |
+
if not token.is_identifier():
|
| 707 |
+
raise dns.exception.SyntaxError("expecting an identifier")
|
| 708 |
+
return dns.ttl.from_text(token.value)
|
vllm/lib/python3.10/site-packages/dns/ttl.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
|
| 2 |
+
|
| 3 |
+
# Copyright (C) 2003-2017 Nominum, Inc.
|
| 4 |
+
#
|
| 5 |
+
# Permission to use, copy, modify, and distribute this software and its
|
| 6 |
+
# documentation for any purpose with or without fee is hereby granted,
|
| 7 |
+
# provided that the above copyright notice and this permission notice
|
| 8 |
+
# appear in all copies.
|
| 9 |
+
#
|
| 10 |
+
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
| 11 |
+
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
| 12 |
+
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
| 13 |
+
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
| 14 |
+
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
| 15 |
+
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
| 16 |
+
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
| 17 |
+
|
| 18 |
+
"""DNS TTL conversion."""
|
| 19 |
+
|
| 20 |
+
from typing import Union
|
| 21 |
+
|
| 22 |
+
import dns.exception
|
| 23 |
+
|
| 24 |
+
# Technically TTLs are supposed to be between 0 and 2**31 - 1, with values
|
| 25 |
+
# greater than that interpreted as 0, but we do not impose this policy here
|
| 26 |
+
# as values > 2**31 - 1 occur in real world data.
|
| 27 |
+
#
|
| 28 |
+
# We leave it to applications to impose tighter bounds if desired.
|
| 29 |
+
MAX_TTL = 2**32 - 1
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class BadTTL(dns.exception.SyntaxError):
|
| 33 |
+
"""DNS TTL value is not well-formed."""
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def from_text(text: str) -> int:
|
| 37 |
+
"""Convert the text form of a TTL to an integer.
|
| 38 |
+
|
| 39 |
+
The BIND 8 units syntax for TTLs (e.g. '1w6d4h3m10s') is supported.
|
| 40 |
+
|
| 41 |
+
*text*, a ``str``, the textual TTL.
|
| 42 |
+
|
| 43 |
+
Raises ``dns.ttl.BadTTL`` if the TTL is not well-formed.
|
| 44 |
+
|
| 45 |
+
Returns an ``int``.
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
if text.isdigit():
|
| 49 |
+
total = int(text)
|
| 50 |
+
elif len(text) == 0:
|
| 51 |
+
raise BadTTL
|
| 52 |
+
else:
|
| 53 |
+
total = 0
|
| 54 |
+
current = 0
|
| 55 |
+
need_digit = True
|
| 56 |
+
for c in text:
|
| 57 |
+
if c.isdigit():
|
| 58 |
+
current *= 10
|
| 59 |
+
current += int(c)
|
| 60 |
+
need_digit = False
|
| 61 |
+
else:
|
| 62 |
+
if need_digit:
|
| 63 |
+
raise BadTTL
|
| 64 |
+
c = c.lower()
|
| 65 |
+
if c == "w":
|
| 66 |
+
total += current * 604800
|
| 67 |
+
elif c == "d":
|
| 68 |
+
total += current * 86400
|
| 69 |
+
elif c == "h":
|
| 70 |
+
total += current * 3600
|
| 71 |
+
elif c == "m":
|
| 72 |
+
total += current * 60
|
| 73 |
+
elif c == "s":
|
| 74 |
+
total += current
|
| 75 |
+
else:
|
| 76 |
+
raise BadTTL(f"unknown unit '{c}'")
|
| 77 |
+
current = 0
|
| 78 |
+
need_digit = True
|
| 79 |
+
if not current == 0:
|
| 80 |
+
raise BadTTL("trailing integer")
|
| 81 |
+
if total < 0 or total > MAX_TTL:
|
| 82 |
+
raise BadTTL("TTL should be between 0 and 2**32 - 1 (inclusive)")
|
| 83 |
+
return total
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def make(value: Union[int, str]) -> int:
|
| 87 |
+
if isinstance(value, int):
|
| 88 |
+
return value
|
| 89 |
+
elif isinstance(value, str):
|
| 90 |
+
return dns.ttl.from_text(value)
|
| 91 |
+
else:
|
| 92 |
+
raise ValueError("cannot convert value to TTL")
|
vllm/lib/python3.10/site-packages/httpcore/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (2.75 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/httpcore/__pycache__/_api.cpython-310.pyc
ADDED
|
Binary file (3.31 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/httpcore/__pycache__/_exceptions.cpython-310.pyc
ADDED
|
Binary file (2.37 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/httpcore/__pycache__/_ssl.cpython-310.pyc
ADDED
|
Binary file (423 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/httpcore/__pycache__/_synchronization.cpython-310.pyc
ADDED
|
Binary file (9.44 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/httpcore/__pycache__/_utils.cpython-310.pyc
ADDED
|
Binary file (909 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/httpcore/_async/__init__.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .connection import AsyncHTTPConnection
|
| 2 |
+
from .connection_pool import AsyncConnectionPool
|
| 3 |
+
from .http11 import AsyncHTTP11Connection
|
| 4 |
+
from .http_proxy import AsyncHTTPProxy
|
| 5 |
+
from .interfaces import AsyncConnectionInterface
|
| 6 |
+
|
| 7 |
+
try:
|
| 8 |
+
from .http2 import AsyncHTTP2Connection
|
| 9 |
+
except ImportError: # pragma: nocover
|
| 10 |
+
|
| 11 |
+
class AsyncHTTP2Connection: # type: ignore
|
| 12 |
+
def __init__(self, *args, **kwargs) -> None: # type: ignore
|
| 13 |
+
raise RuntimeError(
|
| 14 |
+
"Attempted to use http2 support, but the `h2` package is not "
|
| 15 |
+
"installed. Use 'pip install httpcore[http2]'."
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
try:
|
| 20 |
+
from .socks_proxy import AsyncSOCKSProxy
|
| 21 |
+
except ImportError: # pragma: nocover
|
| 22 |
+
|
| 23 |
+
class AsyncSOCKSProxy: # type: ignore
|
| 24 |
+
def __init__(self, *args, **kwargs) -> None: # type: ignore
|
| 25 |
+
raise RuntimeError(
|
| 26 |
+
"Attempted to use SOCKS support, but the `socksio` package is not "
|
| 27 |
+
"installed. Use 'pip install httpcore[socks]'."
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
__all__ = [
|
| 32 |
+
"AsyncHTTPConnection",
|
| 33 |
+
"AsyncConnectionPool",
|
| 34 |
+
"AsyncHTTPProxy",
|
| 35 |
+
"AsyncHTTP11Connection",
|
| 36 |
+
"AsyncHTTP2Connection",
|
| 37 |
+
"AsyncConnectionInterface",
|
| 38 |
+
"AsyncSOCKSProxy",
|
| 39 |
+
]
|
vllm/lib/python3.10/site-packages/httpcore/_async/__pycache__/connection.cpython-310.pyc
ADDED
|
Binary file (6.81 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/httpcore/_async/__pycache__/connection_pool.cpython-310.pyc
ADDED
|
Binary file (13 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/httpcore/_async/__pycache__/http11.cpython-310.pyc
ADDED
|
Binary file (11.6 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/httpcore/_async/__pycache__/http2.cpython-310.pyc
ADDED
|
Binary file (16.6 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/httpcore/_async/__pycache__/http_proxy.cpython-310.pyc
ADDED
|
Binary file (12.4 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/httpcore/_async/__pycache__/interfaces.cpython-310.pyc
ADDED
|
Binary file (4.47 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/httpcore/_async/__pycache__/socks_proxy.cpython-310.pyc
ADDED
|
Binary file (10.3 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/httpcore/_async/connection.py
ADDED
|
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import itertools
|
| 4 |
+
import logging
|
| 5 |
+
import ssl
|
| 6 |
+
import types
|
| 7 |
+
import typing
|
| 8 |
+
|
| 9 |
+
from .._backends.auto import AutoBackend
|
| 10 |
+
from .._backends.base import SOCKET_OPTION, AsyncNetworkBackend, AsyncNetworkStream
|
| 11 |
+
from .._exceptions import ConnectError, ConnectTimeout
|
| 12 |
+
from .._models import Origin, Request, Response
|
| 13 |
+
from .._ssl import default_ssl_context
|
| 14 |
+
from .._synchronization import AsyncLock
|
| 15 |
+
from .._trace import Trace
|
| 16 |
+
from .http11 import AsyncHTTP11Connection
|
| 17 |
+
from .interfaces import AsyncConnectionInterface
|
| 18 |
+
|
| 19 |
+
RETRIES_BACKOFF_FACTOR = 0.5 # 0s, 0.5s, 1s, 2s, 4s, etc.
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
logger = logging.getLogger("httpcore.connection")
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def exponential_backoff(factor: float) -> typing.Iterator[float]:
|
| 26 |
+
"""
|
| 27 |
+
Generate a geometric sequence that has a ratio of 2 and starts with 0.
|
| 28 |
+
|
| 29 |
+
For example:
|
| 30 |
+
- `factor = 2`: `0, 2, 4, 8, 16, 32, 64, ...`
|
| 31 |
+
- `factor = 3`: `0, 3, 6, 12, 24, 48, 96, ...`
|
| 32 |
+
"""
|
| 33 |
+
yield 0
|
| 34 |
+
for n in itertools.count():
|
| 35 |
+
yield factor * 2**n
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class AsyncHTTPConnection(AsyncConnectionInterface):
|
| 39 |
+
def __init__(
|
| 40 |
+
self,
|
| 41 |
+
origin: Origin,
|
| 42 |
+
ssl_context: ssl.SSLContext | None = None,
|
| 43 |
+
keepalive_expiry: float | None = None,
|
| 44 |
+
http1: bool = True,
|
| 45 |
+
http2: bool = False,
|
| 46 |
+
retries: int = 0,
|
| 47 |
+
local_address: str | None = None,
|
| 48 |
+
uds: str | None = None,
|
| 49 |
+
network_backend: AsyncNetworkBackend | None = None,
|
| 50 |
+
socket_options: typing.Iterable[SOCKET_OPTION] | None = None,
|
| 51 |
+
) -> None:
|
| 52 |
+
self._origin = origin
|
| 53 |
+
self._ssl_context = ssl_context
|
| 54 |
+
self._keepalive_expiry = keepalive_expiry
|
| 55 |
+
self._http1 = http1
|
| 56 |
+
self._http2 = http2
|
| 57 |
+
self._retries = retries
|
| 58 |
+
self._local_address = local_address
|
| 59 |
+
self._uds = uds
|
| 60 |
+
|
| 61 |
+
self._network_backend: AsyncNetworkBackend = (
|
| 62 |
+
AutoBackend() if network_backend is None else network_backend
|
| 63 |
+
)
|
| 64 |
+
self._connection: AsyncConnectionInterface | None = None
|
| 65 |
+
self._connect_failed: bool = False
|
| 66 |
+
self._request_lock = AsyncLock()
|
| 67 |
+
self._socket_options = socket_options
|
| 68 |
+
|
| 69 |
+
async def handle_async_request(self, request: Request) -> Response:
|
| 70 |
+
if not self.can_handle_request(request.url.origin):
|
| 71 |
+
raise RuntimeError(
|
| 72 |
+
f"Attempted to send request to {request.url.origin} on connection to {self._origin}"
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
try:
|
| 76 |
+
async with self._request_lock:
|
| 77 |
+
if self._connection is None:
|
| 78 |
+
stream = await self._connect(request)
|
| 79 |
+
|
| 80 |
+
ssl_object = stream.get_extra_info("ssl_object")
|
| 81 |
+
http2_negotiated = (
|
| 82 |
+
ssl_object is not None
|
| 83 |
+
and ssl_object.selected_alpn_protocol() == "h2"
|
| 84 |
+
)
|
| 85 |
+
if http2_negotiated or (self._http2 and not self._http1):
|
| 86 |
+
from .http2 import AsyncHTTP2Connection
|
| 87 |
+
|
| 88 |
+
self._connection = AsyncHTTP2Connection(
|
| 89 |
+
origin=self._origin,
|
| 90 |
+
stream=stream,
|
| 91 |
+
keepalive_expiry=self._keepalive_expiry,
|
| 92 |
+
)
|
| 93 |
+
else:
|
| 94 |
+
self._connection = AsyncHTTP11Connection(
|
| 95 |
+
origin=self._origin,
|
| 96 |
+
stream=stream,
|
| 97 |
+
keepalive_expiry=self._keepalive_expiry,
|
| 98 |
+
)
|
| 99 |
+
except BaseException as exc:
|
| 100 |
+
self._connect_failed = True
|
| 101 |
+
raise exc
|
| 102 |
+
|
| 103 |
+
return await self._connection.handle_async_request(request)
|
| 104 |
+
|
| 105 |
+
async def _connect(self, request: Request) -> AsyncNetworkStream:
|
| 106 |
+
timeouts = request.extensions.get("timeout", {})
|
| 107 |
+
sni_hostname = request.extensions.get("sni_hostname", None)
|
| 108 |
+
timeout = timeouts.get("connect", None)
|
| 109 |
+
|
| 110 |
+
retries_left = self._retries
|
| 111 |
+
delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR)
|
| 112 |
+
|
| 113 |
+
while True:
|
| 114 |
+
try:
|
| 115 |
+
if self._uds is None:
|
| 116 |
+
kwargs = {
|
| 117 |
+
"host": self._origin.host.decode("ascii"),
|
| 118 |
+
"port": self._origin.port,
|
| 119 |
+
"local_address": self._local_address,
|
| 120 |
+
"timeout": timeout,
|
| 121 |
+
"socket_options": self._socket_options,
|
| 122 |
+
}
|
| 123 |
+
async with Trace("connect_tcp", logger, request, kwargs) as trace:
|
| 124 |
+
stream = await self._network_backend.connect_tcp(**kwargs)
|
| 125 |
+
trace.return_value = stream
|
| 126 |
+
else:
|
| 127 |
+
kwargs = {
|
| 128 |
+
"path": self._uds,
|
| 129 |
+
"timeout": timeout,
|
| 130 |
+
"socket_options": self._socket_options,
|
| 131 |
+
}
|
| 132 |
+
async with Trace(
|
| 133 |
+
"connect_unix_socket", logger, request, kwargs
|
| 134 |
+
) as trace:
|
| 135 |
+
stream = await self._network_backend.connect_unix_socket(
|
| 136 |
+
**kwargs
|
| 137 |
+
)
|
| 138 |
+
trace.return_value = stream
|
| 139 |
+
|
| 140 |
+
if self._origin.scheme in (b"https", b"wss"):
|
| 141 |
+
ssl_context = (
|
| 142 |
+
default_ssl_context()
|
| 143 |
+
if self._ssl_context is None
|
| 144 |
+
else self._ssl_context
|
| 145 |
+
)
|
| 146 |
+
alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"]
|
| 147 |
+
ssl_context.set_alpn_protocols(alpn_protocols)
|
| 148 |
+
|
| 149 |
+
kwargs = {
|
| 150 |
+
"ssl_context": ssl_context,
|
| 151 |
+
"server_hostname": sni_hostname
|
| 152 |
+
or self._origin.host.decode("ascii"),
|
| 153 |
+
"timeout": timeout,
|
| 154 |
+
}
|
| 155 |
+
async with Trace("start_tls", logger, request, kwargs) as trace:
|
| 156 |
+
stream = await stream.start_tls(**kwargs)
|
| 157 |
+
trace.return_value = stream
|
| 158 |
+
return stream
|
| 159 |
+
except (ConnectError, ConnectTimeout):
|
| 160 |
+
if retries_left <= 0:
|
| 161 |
+
raise
|
| 162 |
+
retries_left -= 1
|
| 163 |
+
delay = next(delays)
|
| 164 |
+
async with Trace("retry", logger, request, kwargs) as trace:
|
| 165 |
+
await self._network_backend.sleep(delay)
|
| 166 |
+
|
| 167 |
+
def can_handle_request(self, origin: Origin) -> bool:
|
| 168 |
+
return origin == self._origin
|
| 169 |
+
|
| 170 |
+
async def aclose(self) -> None:
|
| 171 |
+
if self._connection is not None:
|
| 172 |
+
async with Trace("close", logger, None, {}):
|
| 173 |
+
await self._connection.aclose()
|
| 174 |
+
|
| 175 |
+
def is_available(self) -> bool:
|
| 176 |
+
if self._connection is None:
|
| 177 |
+
# If HTTP/2 support is enabled, and the resulting connection could
|
| 178 |
+
# end up as HTTP/2 then we should indicate the connection as being
|
| 179 |
+
# available to service multiple requests.
|
| 180 |
+
return (
|
| 181 |
+
self._http2
|
| 182 |
+
and (self._origin.scheme == b"https" or not self._http1)
|
| 183 |
+
and not self._connect_failed
|
| 184 |
+
)
|
| 185 |
+
return self._connection.is_available()
|
| 186 |
+
|
| 187 |
+
def has_expired(self) -> bool:
|
| 188 |
+
if self._connection is None:
|
| 189 |
+
return self._connect_failed
|
| 190 |
+
return self._connection.has_expired()
|
| 191 |
+
|
| 192 |
+
def is_idle(self) -> bool:
|
| 193 |
+
if self._connection is None:
|
| 194 |
+
return self._connect_failed
|
| 195 |
+
return self._connection.is_idle()
|
| 196 |
+
|
| 197 |
+
def is_closed(self) -> bool:
|
| 198 |
+
if self._connection is None:
|
| 199 |
+
return self._connect_failed
|
| 200 |
+
return self._connection.is_closed()
|
| 201 |
+
|
| 202 |
+
def info(self) -> str:
|
| 203 |
+
if self._connection is None:
|
| 204 |
+
return "CONNECTION FAILED" if self._connect_failed else "CONNECTING"
|
| 205 |
+
return self._connection.info()
|
| 206 |
+
|
| 207 |
+
def __repr__(self) -> str:
|
| 208 |
+
return f"<{self.__class__.__name__} [{self.info()}]>"
|
| 209 |
+
|
| 210 |
+
# These context managers are not used in the standard flow, but are
|
| 211 |
+
# useful for testing or working with connection instances directly.
|
| 212 |
+
|
| 213 |
+
async def __aenter__(self) -> AsyncHTTPConnection:
|
| 214 |
+
return self
|
| 215 |
+
|
| 216 |
+
async def __aexit__(
|
| 217 |
+
self,
|
| 218 |
+
exc_type: type[BaseException] | None = None,
|
| 219 |
+
exc_value: BaseException | None = None,
|
| 220 |
+
traceback: types.TracebackType | None = None,
|
| 221 |
+
) -> None:
|
| 222 |
+
await self.aclose()
|