ZTWHHH commited on
Commit
94b06b5
·
verified ·
1 Parent(s): a8553ce

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. evalkit_internvl/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/INSTALLER +1 -0
  3. evalkit_internvl/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/METADATA +206 -0
  4. evalkit_internvl/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/NOTICE.md +3 -0
  5. evalkit_internvl/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/RECORD +99 -0
  6. evalkit_internvl/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/WHEEL +5 -0
  7. evalkit_internvl/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/top_level.txt +1 -0
  8. evalkit_internvl/lib/python3.10/site-packages/httpcore/__init__.py +139 -0
  9. evalkit_internvl/lib/python3.10/site-packages/httpcore/_api.py +92 -0
  10. evalkit_internvl/lib/python3.10/site-packages/httpcore/_exceptions.py +81 -0
  11. evalkit_internvl/lib/python3.10/site-packages/httpcore/_models.py +483 -0
  12. evalkit_internvl/lib/python3.10/site-packages/httpcore/_ssl.py +9 -0
  13. evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__init__.py +39 -0
  14. evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/connection_pool.py +356 -0
  15. evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/interfaces.py +135 -0
  16. evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/socks_proxy.py +340 -0
  17. evalkit_internvl/lib/python3.10/site-packages/httpcore/_synchronization.py +279 -0
  18. evalkit_internvl/lib/python3.10/site-packages/httpcore/_trace.py +105 -0
  19. evalkit_internvl/lib/python3.10/site-packages/httpcore/_utils.py +36 -0
  20. evalkit_internvl/lib/python3.10/site-packages/httpcore/py.typed +0 -0
  21. evalkit_internvl/lib/python3.10/site-packages/pexpect/ANSI.py +351 -0
  22. evalkit_internvl/lib/python3.10/site-packages/pexpect/FSM.py +334 -0
  23. evalkit_internvl/lib/python3.10/site-packages/pexpect/__init__.py +91 -0
  24. evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/ANSI.cpython-310.pyc +0 -0
  25. evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/FSM.cpython-310.pyc +0 -0
  26. evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/_async_pre_await.cpython-310.pyc +0 -0
  27. evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/_async_w_await.cpython-310.pyc +0 -0
  28. evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/exceptions.cpython-310.pyc +0 -0
  29. evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/pty_spawn.cpython-310.pyc +0 -0
  30. evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/pxssh.cpython-310.pyc +0 -0
  31. evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/replwrap.cpython-310.pyc +0 -0
  32. evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/screen.cpython-310.pyc +0 -0
  33. evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/spawnbase.cpython-310.pyc +0 -0
  34. evalkit_internvl/lib/python3.10/site-packages/pexpect/_async.py +28 -0
  35. evalkit_internvl/lib/python3.10/site-packages/pexpect/_async_pre_await.py +111 -0
  36. evalkit_internvl/lib/python3.10/site-packages/pexpect/_async_w_await.py +118 -0
  37. evalkit_internvl/lib/python3.10/site-packages/pexpect/bashrc.sh +18 -0
  38. evalkit_internvl/lib/python3.10/site-packages/pexpect/exceptions.py +35 -0
  39. evalkit_internvl/lib/python3.10/site-packages/pexpect/expect.py +371 -0
  40. evalkit_internvl/lib/python3.10/site-packages/pexpect/fdpexpect.py +152 -0
  41. evalkit_internvl/lib/python3.10/site-packages/pexpect/popen_spawn.py +188 -0
  42. evalkit_internvl/lib/python3.10/site-packages/pexpect/pty_spawn.py +860 -0
  43. evalkit_internvl/lib/python3.10/site-packages/pexpect/pxssh.py +540 -0
  44. evalkit_internvl/lib/python3.10/site-packages/pexpect/replwrap.py +136 -0
  45. evalkit_internvl/lib/python3.10/site-packages/pexpect/run.py +157 -0
  46. evalkit_internvl/lib/python3.10/site-packages/pexpect/screen.py +431 -0
  47. evalkit_internvl/lib/python3.10/site-packages/pexpect/socket_pexpect.py +145 -0
  48. evalkit_internvl/lib/python3.10/site-packages/pexpect/spawnbase.py +536 -0
  49. evalkit_internvl/lib/python3.10/site-packages/pexpect/utils.py +187 -0
  50. evalkit_internvl/lib/python3.10/site-packages/sympy/polys/benchmarks/__pycache__/bench_solvers.cpython-310.pyc +3 -0
.gitattributes CHANGED
@@ -1643,3 +1643,4 @@ evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/ode.
1643
  evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/__pycache__/solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1644
  evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1645
  evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/single.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
1643
  evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/__pycache__/solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1644
  evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1645
  evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/single.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1646
+ evalkit_internvl/lib/python3.10/site-packages/sympy/polys/benchmarks/__pycache__/bench_solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
evalkit_internvl/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
evalkit_internvl/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/METADATA ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: bitsandbytes
3
+ Version: 0.41.0
4
+ Summary: k-bit optimizers and matrix multiplication routines.
5
+ Home-page: https://github.com/TimDettmers/bitsandbytes
6
+ Author: Tim Dettmers
7
+ Author-email: dettmers@cs.washington.edu
8
+ License: MIT
9
+ Keywords: gpu optimizers optimization 8-bit quantization compression
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
12
+ Description-Content-Type: text/markdown
13
+ License-File: LICENSE
14
+ License-File: NOTICE.md
15
+
16
+ # bitsandbytes
17
+
18
+ The bitsandbytes is a lightweight wrapper around CUDA custom functions, in particular 8-bit optimizers, matrix multiplication (LLM.int8()), and quantization functions.
19
+
20
+
21
+
22
+ Resources:
23
+ - [8-bit Optimizer Paper](https://arxiv.org/abs/2110.02861) -- [Video](https://www.youtube.com/watch?v=IxrlHAJtqKE) -- [Docs](https://bitsandbytes.readthedocs.io/en/latest/)
24
+
25
+ - [LLM.int8() Paper](https://arxiv.org/abs/2208.07339) -- [LLM.int8() Software Blog Post](https://huggingface.co/blog/hf-bitsandbytes-integration) -- [LLM.int8() Emergent Features Blog Post](https://timdettmers.com/2022/08/17/llm-int8-and-emergent-features/)
26
+
27
+ ## TL;DR
28
+ **Requirements**
29
+ Python >=3.8. Linux distribution (Ubuntu, MacOS, etc.) + CUDA > 10.0.
30
+
31
+ (Deprecated: CUDA 10.0 is deprecated and only CUDA >= 11.0) will be supported with release 0.39.0)
32
+
33
+ **Installation**:
34
+
35
+ ``pip install bitsandbytes``
36
+
37
+ In some cases it can happen that you need to compile from source. If this happens please consider submitting a bug report with `python -m bitsandbytes` information. What now follows is some short instructions which might work out of the box if `nvcc` is installed. If these do not work see further below.
38
+
39
+ Compilation quickstart:
40
+ ```bash
41
+ git clone https://github.com/timdettmers/bitsandbytes.git
42
+ cd bitsandbytes
43
+
44
+ # CUDA_VERSIONS in {110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 120}
45
+ # make argument in {cuda110, cuda11x, cuda12x}
46
+ # if you do not know what CUDA you have, try looking at the output of: python -m bitsandbytes
47
+ CUDA_VERSION=117 make cuda11x
48
+ python setup.py install
49
+ ```
50
+
51
+ **Using Int8 inference with HuggingFace Transformers**
52
+
53
+ ```python
54
+ from transformers import AutoModelForCausalLM
55
+ model = AutoModelForCausalLM.from_pretrained(
56
+ 'decapoda-research/llama-7b-hf,
57
+ device_map='auto',
58
+ load_in_8bit=True,
59
+ max_memory=f'{int(torch.cuda.mem_get_info()[0]/1024**3)-2}GB')
60
+ ```
61
+
62
+ A more detailed example, can be found in [examples/int8_inference_huggingface.py](examples/int8_inference_huggingface.py).
63
+
64
+ **Using 8-bit optimizer**:
65
+ 1. Comment out optimizer: ``#torch.optim.Adam(....)``
66
+ 2. Add 8-bit optimizer of your choice ``bnb.optim.Adam8bit(....)`` (arguments stay the same)
67
+ 3. Replace embedding layer if necessary: ``torch.nn.Embedding(..) -> bnb.nn.Embedding(..)``
68
+
69
+
70
+ **Using 8-bit Inference**:
71
+ 1. Comment out torch.nn.Linear: ``#linear = torch.nn.Linear(...)``
72
+ 2. Add bnb 8-bit linear light module: ``linear = bnb.nn.Linear8bitLt(...)`` (base arguments stay the same)
73
+ 3. There are two modes:
74
+ - Mixed 8-bit training with 16-bit main weights. Pass the argument ``has_fp16_weights=True`` (default)
75
+ - Int8 inference. Pass the argument ``has_fp16_weights=False``
76
+ 4. To use the full LLM.int8() method, use the ``threshold=k`` argument. We recommend ``k=6.0``.
77
+ ```python
78
+ # LLM.int8()
79
+ linear = bnb.nn.Linear8bitLt(dim1, dim2, bias=True, has_fp16_weights=False, threshold=6.0)
80
+ # inputs need to be fp16
81
+ out = linear(x.to(torch.float16))
82
+ ```
83
+
84
+
85
+ ## Features
86
+ - 8-bit Matrix multiplication with mixed precision decomposition
87
+ - LLM.int8() inference
88
+ - 8-bit Optimizers: Adam, AdamW, RMSProp, LARS, LAMB, Lion (saves 75% memory)
89
+ - Stable Embedding Layer: Improved stability through better initialization, and normalization
90
+ - 8-bit quantization: Quantile, Linear, and Dynamic quantization
91
+ - Fast quantile estimation: Up to 100x faster than other algorithms
92
+
93
+ ## Requirements & Installation
94
+
95
+ Requirements: anaconda, cudatoolkit, pytorch
96
+
97
+ Hardware requirements:
98
+ - LLM.int8(): NVIDIA Turing (RTX 20xx; T4) or Ampere GPU (RTX 30xx; A4-A100); (a GPU from 2018 or older).
99
+ - 8-bit optimizers and quantization: NVIDIA Kepler GPU or newer (>=GTX 78X).
100
+
101
+ Supported CUDA versions: 10.2 - 12.0
102
+
103
+ The bitsandbytes library is currently only supported on Linux distributions. Windows is not supported at the moment.
104
+
105
+ The requirements can best be fulfilled by installing pytorch via anaconda. You can install PyTorch by following the ["Get Started"](https://pytorch.org/get-started/locally/) instructions on the official website.
106
+
107
+ To install run:
108
+
109
+ ``pip install bitsandbytes``
110
+
111
+ ## Using bitsandbytes
112
+
113
+ ### Using Int8 Matrix Multiplication
114
+
115
+ For straight Int8 matrix multiplication with mixed precision decomposition you can use ``bnb.matmul(...)``. To enable mixed precision decomposition, use the threshold parameter:
116
+ ```python
117
+ bnb.matmul(..., threshold=6.0)
118
+ ```
119
+
120
+ For instructions how to use LLM.int8() inference layers in your own code, see the TL;DR above or for extended instruction see [this blog post](https://github.com/huggingface/transformers).
121
+
122
+ ### Using the 8-bit Optimizers
123
+
124
+ With bitsandbytes 8-bit optimizers can be used by changing a single line of code in your codebase. For NLP models we recommend also to use the StableEmbedding layers (see below) which improves results and helps with stable 8-bit optimization. To get started with 8-bit optimizers, it is sufficient to replace your old optimizer with the 8-bit optimizer in the following way:
125
+ ```python
126
+ import bitsandbytes as bnb
127
+
128
+ # adam = torch.optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.995)) # comment out old optimizer
129
+ adam = bnb.optim.Adam8bit(model.parameters(), lr=0.001, betas=(0.9, 0.995)) # add bnb optimizer
130
+ adam = bnb.optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.995), optim_bits=8) # equivalent
131
+
132
+
133
+ torch.nn.Embedding(...) -> bnb.nn.StableEmbedding(...) # recommended for NLP models
134
+ ```
135
+
136
+ Note that by default all parameter tensors with less than 4096 elements are kept at 32-bit even if you initialize those parameters with 8-bit optimizers. This is done since such small tensors do not save much memory and often contain highly variable parameters (biases) or parameters that require high precision (batch norm, layer norm). You can change this behavior like so:
137
+ ```
138
+ # parameter tensors with less than 16384 values are optimized in 32-bit
139
+ # it is recommended to use multiplies of 4096
140
+ adam = bnb.optim.Adam8bit(model.parameters(), min_8bit_size=16384)
141
+ ```
142
+
143
+ ### Change Bits and other Hyperparameters for Individual Parameters
144
+
145
+ If you want to optimize some unstable parameters with 32-bit Adam and others with 8-bit Adam, you can use the `GlobalOptimManager`. With this, we can also configure specific hyperparameters for particular layers, such as embedding layers. To do that, we need two things: (1) register the parameter while they are still on the CPU, (2) override the config with the new desired hyperparameters (anytime, anywhere). See our [guide](howto_config_override.md) for more details
146
+
147
+ ### Fairseq Users
148
+
149
+ To use the Stable Embedding Layer, override the respective `build_embedding(...)` function of your model. Make sure to also use the `--no-scale-embedding` flag to disable scaling of the word embedding layer (nor replaced with layer norm). You can use the optimizers by replacing the optimizer in the respective file (`adam.py` etc.).
150
+
151
+ ## Release and Feature History
152
+
153
+ For upcoming features and changes and full history see [Patch Notes](CHANGELOG.md).
154
+
155
+ ## Errors
156
+
157
+ 1. RuntimeError: CUDA error: no kernel image is available for execution on the device. [Solution](errors_and_solutions.md#No-kernel-image-available)
158
+ 2. __fatbinwrap_.. [Solution](errors_and_solutions.md#fatbinwrap_)
159
+
160
+ ## Compile from source
161
+ To compile from source, you need an installation of CUDA. If `nvcc` is not installed, you can install the CUDA Toolkit with nvcc through the following commands.
162
+
163
+ ```bash
164
+ wget https://raw.githubusercontent.com/TimDettmers/bitsandbytes/main/cuda_install.sh
165
+ # Syntax cuda_install CUDA_VERSION INSTALL_PREFIX EXPORT_TO_BASH
166
+ # CUDA_VERSION in {110, 111, 112, 113, 114, 115, 116, 117, 118, 120, 121}
167
+ # EXPORT_TO_BASH in {0, 1} with 0=False and 1=True
168
+
169
+ # For example, the following installs CUDA 11.8 to ~/local/cuda-11.8 and exports the path to your .bashrc
170
+ bash cuda install 118 ~/local 1
171
+ ```
172
+
173
+ To use a specific CUDA version just for a single compile run, you can set the variable `CUDA_HOME`, for example the following command compiles `libbitsandbytes_cuda117.so` using compiler flags for cuda11x with the cuda version at `~/local/cuda-11.7`:
174
+
175
+ ``CUDA_HOME=~/local/cuda-11.7 CUDA_VERSION=117 make cuda11x``
176
+
177
+ For more detailed instruction, please follow the [compile_from_source.md](compile_from_source.md) instructions.
178
+
179
+ ## License
180
+
181
+ The majority of bitsandbytes is licensed under MIT, however portions of the project are available under separate license terms: Pytorch is licensed under the BSD license.
182
+
183
+ We thank Fabio Cannizzo for his work on [FastBinarySearch](https://github.com/fabiocannizzo/FastBinarySearch) which we use for CPU quantization.
184
+
185
+ ## How to cite us
186
+ If you found this library and found LLM.int8() useful, please consider citing our work:
187
+
188
+ ```bibtex
189
+ @article{dettmers2022llmint8,
190
+ title={LLM.int8(): 8-bit Matrix Multiplication for Transformers at Scale},
191
+ author={Dettmers, Tim and Lewis, Mike and Belkada, Younes and Zettlemoyer, Luke},
192
+ journal={arXiv preprint arXiv:2208.07339},
193
+ year={2022}
194
+ }
195
+ ```
196
+
197
+ For 8-bit optimizers or quantization routines, please consider citing the following work:
198
+
199
+ ```bibtex
200
+ @article{dettmers2022optimizers,
201
+ title={8-bit Optimizers via Block-wise Quantization},
202
+ author={Dettmers, Tim and Lewis, Mike and Shleifer, Sam and Zettlemoyer, Luke},
203
+ journal={9th International Conference on Learning Representations, ICLR},
204
+ year={2022}
205
+ }
206
+ ```
evalkit_internvl/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/NOTICE.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ The majority of bitsandbytes is licensed under MIT, however portions of the project are available under separate license terms: Pytorch is licensed under the BSD license.
2
+
3
+ We thank Fabio Cannizzo for this work on FastBinarySearch which is included in this project.
evalkit_internvl/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/RECORD ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ bitsandbytes-0.41.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ bitsandbytes-0.41.0.dist-info/LICENSE,sha256=UkEte8fOQVfqYou6rLiCngqcs8WPV_mRdhJryM8r_IU,1086
3
+ bitsandbytes-0.41.0.dist-info/METADATA,sha256=z88wKooZxLJ9Z5T3i4YEWBIzRKR9o3DZIes663fhUu4,9810
4
+ bitsandbytes-0.41.0.dist-info/NOTICE.md,sha256=_4zDL2L8BqUwtmvoznR_wqhQmsP2QwdXHrAHnBMzAl8,265
5
+ bitsandbytes-0.41.0.dist-info/RECORD,,
6
+ bitsandbytes-0.41.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ bitsandbytes-0.41.0.dist-info/WHEEL,sha256=AtBG6SXL3KF_v0NxLf0ehyVOh0cold-JbJYXNGorC6Q,92
8
+ bitsandbytes-0.41.0.dist-info/top_level.txt,sha256=bK-Zzu-JyIIh4njm8jTYcbuqX-Z80XTcDal4lXCG0-M,13
9
+ bitsandbytes/__init__.py,sha256=mQQknbw8xSpKDtEJgVEiyCemE4HaB-FtAddxY2-Uyhc,670
10
+ bitsandbytes/__main__.py,sha256=rWjs6LsifG_Vglj3WM4brY2IOCjwKpAjuBP3OIzYFPU,4014
11
+ bitsandbytes/__pycache__/__init__.cpython-310.pyc,,
12
+ bitsandbytes/__pycache__/__main__.cpython-310.pyc,,
13
+ bitsandbytes/__pycache__/cextension.cpython-310.pyc,,
14
+ bitsandbytes/__pycache__/functional.cpython-310.pyc,,
15
+ bitsandbytes/__pycache__/utils.cpython-310.pyc,,
16
+ bitsandbytes/autograd/__init__.py,sha256=Ltb59FJrcWYVsTfGW6SscEZtiDhHZe7EFrYnIhnASug,67
17
+ bitsandbytes/autograd/__pycache__/__init__.cpython-310.pyc,,
18
+ bitsandbytes/autograd/__pycache__/_functions.cpython-310.pyc,,
19
+ bitsandbytes/autograd/_functions.py,sha256=ER9xwzolX9T32Xu0VFbvpoRdDiCas1neEaKOZARI2Kw,22361
20
+ bitsandbytes/cextension.py,sha256=klJwL-8ZPylUOETDTW-fvUbZ_Bt_rdB6wRDND1fB_wk,1635
21
+ bitsandbytes/cuda_setup/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
+ bitsandbytes/cuda_setup/__pycache__/__init__.cpython-310.pyc,,
23
+ bitsandbytes/cuda_setup/__pycache__/env_vars.cpython-310.pyc,,
24
+ bitsandbytes/cuda_setup/__pycache__/main.cpython-310.pyc,,
25
+ bitsandbytes/cuda_setup/env_vars.py,sha256=4T8i0LKAbE6tyDceGbJxdW1o4Nm4_vDLY6br39VwCxc,1614
26
+ bitsandbytes/cuda_setup/main.py,sha256=o9YcJj87_t1yADdrMWY0c_XQRyX_8t3XGjwiERKtaVk,17946
27
+ bitsandbytes/functional.py,sha256=vw-RE4CfEirCvM-O8rsiGKvAGIM5cKWNM0Ekbr8-xXc,79598
28
+ bitsandbytes/libbitsandbytes_cpu.so,sha256=nejNfivapxN6MN_bJxFfR423YImIeqNVhXdts2BcDR8,41608
29
+ bitsandbytes/libbitsandbytes_cuda110.so,sha256=1NM_-9xHfCz2djWods0YXQcDKITkX3KSJfklrUESkKw,5938904
30
+ bitsandbytes/libbitsandbytes_cuda110_nocublaslt.so,sha256=q_1Zn2FlCd6LaXYwjkDrE_rq0lFuNwDjGBJlWM_Nufg,11110784
31
+ bitsandbytes/libbitsandbytes_cuda111.so,sha256=JBLZ6wBWB5x1DasFqxcog59xxks5XHzLAdQFGZjCiDY,8974040
32
+ bitsandbytes/libbitsandbytes_cuda111_nocublaslt.so,sha256=1qsndcAVNcCz-LcXytWYx81hPJgifIgNDw1MSx81ays,20244864
33
+ bitsandbytes/libbitsandbytes_cuda114.so,sha256=kh0dVhz5EoSIcpFoRt9vB9rtMSYayFrT1uQmDAP_nCI,9313912
34
+ bitsandbytes/libbitsandbytes_cuda114_nocublaslt.so,sha256=7BfmpKsEYpxamIB7a9WhjhXN7FC1o0FpyqO8IXu1Ep4,20973856
35
+ bitsandbytes/libbitsandbytes_cuda115.so,sha256=ncH3CjlEB0fyXvvj9my_SkUyfGwj_FVo4D-adRX63Gs,9310152
36
+ bitsandbytes/libbitsandbytes_cuda115_nocublaslt.so,sha256=1vB8bV-E6pXTKZzOmfxFWiz3l7LrtQuSAh9n33oY1hM,20925040
37
+ bitsandbytes/libbitsandbytes_cuda117.so,sha256=bEkYZLxEKQZvsu3Agy-aDcIC2ZqQ8B6JDBHL2n1Osq0,9117944
38
+ bitsandbytes/libbitsandbytes_cuda117_nocublaslt.so,sha256=jqc_QsosEBzjd7cNFNA-6QG5e1GGG1cLfEoh7d23zxA,20741032
39
+ bitsandbytes/libbitsandbytes_cuda118.so,sha256=B2MQaG_5NLc8iVHawOSu3V-ABcpbos6QdpSLTQ0IDXY,14918184
40
+ bitsandbytes/libbitsandbytes_cuda118_nocublaslt.so,sha256=GaYqo8N7cNkxbAhI-dizyyBbuOqbEbNRR0nyh8LIWW4,26516696
41
+ bitsandbytes/libbitsandbytes_cuda120.so,sha256=1olVGrA_Frm3ZzYaUxDKRyeWXbJlTTWhlPjO1a0il_o,14504296
42
+ bitsandbytes/libbitsandbytes_cuda120_nocublaslt.so,sha256=VUXyIHZb4V6-SOGPVPWVHyeKafG9xQPLEQIelTh69Oo,25709592
43
+ bitsandbytes/libbitsandbytes_cuda121.so,sha256=XRKDct-9s0poQp0sNFSgdvrGUMed2lRror6aVBU3hGM,14512488
44
+ bitsandbytes/libbitsandbytes_cuda121_nocublaslt.so,sha256=YeYH36m5h2N7tULUoZ8Gt-CAfb8szLDPW5m9OLAQFAE,25721880
45
+ bitsandbytes/libbitsandbytes_cuda122.so,sha256=FrhXhmfraDbGt5I6OzUI1igJ5OkUKWdKDDq5fPYMU0k,14561032
46
+ bitsandbytes/libbitsandbytes_cuda122_nocublaslt.so,sha256=WPSiBD_ozuUsk_aRdoJd5XVTcnpannmEmR6yok2mZTA,25803272
47
+ bitsandbytes/nn/__init__.py,sha256=i-gJR2uQrRvn8zZCZcS1KC0SbsUqCKTta4aV7HXZTT4,446
48
+ bitsandbytes/nn/__pycache__/__init__.cpython-310.pyc,,
49
+ bitsandbytes/nn/__pycache__/modules.cpython-310.pyc,,
50
+ bitsandbytes/nn/__pycache__/triton_based_modules.cpython-310.pyc,,
51
+ bitsandbytes/nn/modules.py,sha256=sIwAAAtMnk9s95HHTOC10rKERMvAl5gw03dCPL12oBY,20528
52
+ bitsandbytes/nn/triton_based_modules.py,sha256=eMEldLd7GX0Dc3dzX0XZpfgzofBPRAi-z1NXf84wCPs,9843
53
+ bitsandbytes/optim/__init__.py,sha256=TSl80yMFkwGBl8N0FBFcfBLt2vt4cZn-hbkuwHGuCUE,794
54
+ bitsandbytes/optim/__pycache__/__init__.cpython-310.pyc,,
55
+ bitsandbytes/optim/__pycache__/adagrad.cpython-310.pyc,,
56
+ bitsandbytes/optim/__pycache__/adam.cpython-310.pyc,,
57
+ bitsandbytes/optim/__pycache__/adamw.cpython-310.pyc,,
58
+ bitsandbytes/optim/__pycache__/lamb.cpython-310.pyc,,
59
+ bitsandbytes/optim/__pycache__/lars.cpython-310.pyc,,
60
+ bitsandbytes/optim/__pycache__/lion.cpython-310.pyc,,
61
+ bitsandbytes/optim/__pycache__/optimizer.cpython-310.pyc,,
62
+ bitsandbytes/optim/__pycache__/rmsprop.cpython-310.pyc,,
63
+ bitsandbytes/optim/__pycache__/sgd.cpython-310.pyc,,
64
+ bitsandbytes/optim/adagrad.py,sha256=E4KsNJKOB2VfgkyKEoeYwFFXnedsxHZItdfzwc5_cdE,3719
65
+ bitsandbytes/optim/adam.py,sha256=nHHvXoeiAuosn4a9VWI3Z7_XmvYC6bOHb8en6mxiwkA,12776
66
+ bitsandbytes/optim/adamw.py,sha256=byibv4xoBM7FUK8FScRTx2KbI4-2Mi0yB8WJCb2x3wE,2699
67
+ bitsandbytes/optim/lamb.py,sha256=hfH4H9eVAHcbjL04DAI_lcPD1OPAmcY4_myow-o21aw,2313
68
+ bitsandbytes/optim/lars.py,sha256=PeUB8RlfaRtHEa-ZZZkrKDdmkHa7XEEfU81irU-mKsY,5653
69
+ bitsandbytes/optim/lion.py,sha256=jANwqVZSAxNZnoqi_OQ9XG8hKa6e84mkwJ9CchtpLHs,2304
70
+ bitsandbytes/optim/optimizer.py,sha256=219zPzx9dpeY0VndzlXt6jn2yV9sEiSXkrxe26wXjIo,25167
71
+ bitsandbytes/optim/rmsprop.py,sha256=1zGT9JIZh214fbBZ-CTirVKk1rQxSZe-BRJzhRtYL2U,2785
72
+ bitsandbytes/optim/sgd.py,sha256=YHVUeEkwxgYx_0GhH0Et6fCpk7rfhboDR2F06jRWz4E,2340
73
+ bitsandbytes/research/__init__.py,sha256=_MilJdwSRWObRfzzy14WD6HsJa6okT4d5YxH4aB9zg4,119
74
+ bitsandbytes/research/__pycache__/__init__.cpython-310.pyc,,
75
+ bitsandbytes/research/autograd/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
76
+ bitsandbytes/research/autograd/__pycache__/__init__.cpython-310.pyc,,
77
+ bitsandbytes/research/autograd/__pycache__/_functions.cpython-310.pyc,,
78
+ bitsandbytes/research/autograd/_functions.py,sha256=k72rcf4hT3M5GOpGoijWkpTAqjRNoecGlOHmTTn3n80,15874
79
+ bitsandbytes/research/nn/__init__.py,sha256=j5XA_2ZA6efMtcbuUCyegfCLkDDQuL3ix5xS4yKZayY,53
80
+ bitsandbytes/research/nn/__pycache__/__init__.cpython-310.pyc,,
81
+ bitsandbytes/research/nn/__pycache__/modules.cpython-310.pyc,,
82
+ bitsandbytes/research/nn/modules.py,sha256=EnI2qVTosAMkH4G1fQleA0zvm8dZR9G-GJ4pFDo8V9M,2357
83
+ bitsandbytes/triton/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
84
+ bitsandbytes/triton/__pycache__/__init__.cpython-310.pyc,,
85
+ bitsandbytes/triton/__pycache__/dequantize_rowwise.cpython-310.pyc,,
86
+ bitsandbytes/triton/__pycache__/int8_matmul_mixed_dequanitze.cpython-310.pyc,,
87
+ bitsandbytes/triton/__pycache__/int8_matmul_rowwise_dequantize.cpython-310.pyc,,
88
+ bitsandbytes/triton/__pycache__/quantize_columnwise_and_transpose.cpython-310.pyc,,
89
+ bitsandbytes/triton/__pycache__/quantize_global.cpython-310.pyc,,
90
+ bitsandbytes/triton/__pycache__/quantize_rowwise.cpython-310.pyc,,
91
+ bitsandbytes/triton/__pycache__/triton_utils.cpython-310.pyc,,
92
+ bitsandbytes/triton/dequantize_rowwise.py,sha256=qdh3f4O53faM6SFT_aYvrytWF_FQW3q2bhBll6Uwfc4,2193
93
+ bitsandbytes/triton/int8_matmul_mixed_dequanitze.py,sha256=QJ_hrZ94ZthnoPD0TCp5ZCPAMkxNNQQY-UNg50TWwHo,8256
94
+ bitsandbytes/triton/int8_matmul_rowwise_dequantize.py,sha256=EMiY3nfx0LIvYEGUqtzcfUonQxwoDcppYli9Qd6kViw,8240
95
+ bitsandbytes/triton/quantize_columnwise_and_transpose.py,sha256=K2fFegPtSsi2tgKxb5goO8YpUmQ6wgTvsXabgTRAFNI,2749
96
+ bitsandbytes/triton/quantize_global.py,sha256=5in9Plx1Kgf6Nx5B1RBXCiJnb0G4qwraGADNiq1LtVc,3957
97
+ bitsandbytes/triton/quantize_rowwise.py,sha256=sraX6TMubZQGiG9Gyh0UFzK823e_TkXZk9R1BILJdPU,2331
98
+ bitsandbytes/triton/triton_utils.py,sha256=f7CP_3lvUoTQJ-xSp4wAfiU8uX_trtGdUsoLzlcsHQY,103
99
+ bitsandbytes/utils.py,sha256=XASxdyR11sKKtY9DIwthe-zLU6v0vXwZzQvIVasjH7o,7499
evalkit_internvl/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.41.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
evalkit_internvl/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ bitsandbytes
evalkit_internvl/lib/python3.10/site-packages/httpcore/__init__.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._api import request, stream
2
+ from ._async import (
3
+ AsyncConnectionInterface,
4
+ AsyncConnectionPool,
5
+ AsyncHTTP2Connection,
6
+ AsyncHTTP11Connection,
7
+ AsyncHTTPConnection,
8
+ AsyncHTTPProxy,
9
+ AsyncSOCKSProxy,
10
+ )
11
+ from ._backends.base import (
12
+ SOCKET_OPTION,
13
+ AsyncNetworkBackend,
14
+ AsyncNetworkStream,
15
+ NetworkBackend,
16
+ NetworkStream,
17
+ )
18
+ from ._backends.mock import AsyncMockBackend, AsyncMockStream, MockBackend, MockStream
19
+ from ._backends.sync import SyncBackend
20
+ from ._exceptions import (
21
+ ConnectError,
22
+ ConnectionNotAvailable,
23
+ ConnectTimeout,
24
+ LocalProtocolError,
25
+ NetworkError,
26
+ PoolTimeout,
27
+ ProtocolError,
28
+ ProxyError,
29
+ ReadError,
30
+ ReadTimeout,
31
+ RemoteProtocolError,
32
+ TimeoutException,
33
+ UnsupportedProtocol,
34
+ WriteError,
35
+ WriteTimeout,
36
+ )
37
+ from ._models import URL, Origin, Request, Response
38
+ from ._ssl import default_ssl_context
39
+ from ._sync import (
40
+ ConnectionInterface,
41
+ ConnectionPool,
42
+ HTTP2Connection,
43
+ HTTP11Connection,
44
+ HTTPConnection,
45
+ HTTPProxy,
46
+ SOCKSProxy,
47
+ )
48
+
49
+ # The 'httpcore.AnyIOBackend' class is conditional on 'anyio' being installed.
50
+ try:
51
+ from ._backends.anyio import AnyIOBackend
52
+ except ImportError: # pragma: nocover
53
+
54
+ class AnyIOBackend: # type: ignore
55
+ def __init__(self, *args, **kwargs): # type: ignore
56
+ msg = (
57
+ "Attempted to use 'httpcore.AnyIOBackend' but 'anyio' is not installed."
58
+ )
59
+ raise RuntimeError(msg)
60
+
61
+
62
+ # The 'httpcore.TrioBackend' class is conditional on 'trio' being installed.
63
+ try:
64
+ from ._backends.trio import TrioBackend
65
+ except ImportError: # pragma: nocover
66
+
67
+ class TrioBackend: # type: ignore
68
+ def __init__(self, *args, **kwargs): # type: ignore
69
+ msg = "Attempted to use 'httpcore.TrioBackend' but 'trio' is not installed."
70
+ raise RuntimeError(msg)
71
+
72
+
73
+ __all__ = [
74
+ # top-level requests
75
+ "request",
76
+ "stream",
77
+ # models
78
+ "Origin",
79
+ "URL",
80
+ "Request",
81
+ "Response",
82
+ # async
83
+ "AsyncHTTPConnection",
84
+ "AsyncConnectionPool",
85
+ "AsyncHTTPProxy",
86
+ "AsyncHTTP11Connection",
87
+ "AsyncHTTP2Connection",
88
+ "AsyncConnectionInterface",
89
+ "AsyncSOCKSProxy",
90
+ # sync
91
+ "HTTPConnection",
92
+ "ConnectionPool",
93
+ "HTTPProxy",
94
+ "HTTP11Connection",
95
+ "HTTP2Connection",
96
+ "ConnectionInterface",
97
+ "SOCKSProxy",
98
+ # network backends, implementations
99
+ "SyncBackend",
100
+ "AnyIOBackend",
101
+ "TrioBackend",
102
+ # network backends, mock implementations
103
+ "AsyncMockBackend",
104
+ "AsyncMockStream",
105
+ "MockBackend",
106
+ "MockStream",
107
+ # network backends, interface
108
+ "AsyncNetworkStream",
109
+ "AsyncNetworkBackend",
110
+ "NetworkStream",
111
+ "NetworkBackend",
112
+ # util
113
+ "default_ssl_context",
114
+ "SOCKET_OPTION",
115
+ # exceptions
116
+ "ConnectionNotAvailable",
117
+ "ProxyError",
118
+ "ProtocolError",
119
+ "LocalProtocolError",
120
+ "RemoteProtocolError",
121
+ "UnsupportedProtocol",
122
+ "TimeoutException",
123
+ "PoolTimeout",
124
+ "ConnectTimeout",
125
+ "ReadTimeout",
126
+ "WriteTimeout",
127
+ "NetworkError",
128
+ "ConnectError",
129
+ "ReadError",
130
+ "WriteError",
131
+ ]
132
+
133
+ __version__ = "0.17.3"
134
+
135
+
136
+ __locals = locals()
137
+ for __name in __all__:
138
+ if not __name.startswith("__"):
139
+ setattr(__locals[__name], "__module__", "httpcore") # noqa
evalkit_internvl/lib/python3.10/site-packages/httpcore/_api.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import contextmanager
2
+ from typing import Iterator, Optional, Union
3
+
4
+ from ._models import URL, Extensions, HeaderTypes, Response
5
+ from ._sync.connection_pool import ConnectionPool
6
+
7
+
8
+ def request(
9
+ method: Union[bytes, str],
10
+ url: Union[URL, bytes, str],
11
+ *,
12
+ headers: HeaderTypes = None,
13
+ content: Union[bytes, Iterator[bytes], None] = None,
14
+ extensions: Optional[Extensions] = None,
15
+ ) -> Response:
16
+ """
17
+ Sends an HTTP request, returning the response.
18
+
19
+ ```
20
+ response = httpcore.request("GET", "https://www.example.com/")
21
+ ```
22
+
23
+ Arguments:
24
+ method: The HTTP method for the request. Typically one of `"GET"`,
25
+ `"OPTIONS"`, `"HEAD"`, `"POST"`, `"PUT"`, `"PATCH"`, or `"DELETE"`.
26
+ url: The URL of the HTTP request. Either as an instance of `httpcore.URL`,
27
+ or as str/bytes.
28
+ headers: The HTTP request headers. Either as a dictionary of str/bytes,
29
+ or as a list of two-tuples of str/bytes.
30
+ content: The content of the request body. Either as bytes,
31
+ or as a bytes iterator.
32
+ extensions: A dictionary of optional extra information included on the request.
33
+ Possible keys include `"timeout"`.
34
+
35
+ Returns:
36
+ An instance of `httpcore.Response`.
37
+ """
38
+ with ConnectionPool() as pool:
39
+ return pool.request(
40
+ method=method,
41
+ url=url,
42
+ headers=headers,
43
+ content=content,
44
+ extensions=extensions,
45
+ )
46
+
47
+
48
+ @contextmanager
49
+ def stream(
50
+ method: Union[bytes, str],
51
+ url: Union[URL, bytes, str],
52
+ *,
53
+ headers: HeaderTypes = None,
54
+ content: Union[bytes, Iterator[bytes], None] = None,
55
+ extensions: Optional[Extensions] = None,
56
+ ) -> Iterator[Response]:
57
+ """
58
+ Sends an HTTP request, returning the response within a content manager.
59
+
60
+ ```
61
+ with httpcore.stream("GET", "https://www.example.com/") as response:
62
+ ...
63
+ ```
64
+
65
+ When using the `stream()` function, the body of the response will not be
66
+ automatically read. If you want to access the response body you should
67
+ either use `content = response.read()`, or `for chunk in response.iter_content()`.
68
+
69
+ Arguments:
70
+ method: The HTTP method for the request. Typically one of `"GET"`,
71
+ `"OPTIONS"`, `"HEAD"`, `"POST"`, `"PUT"`, `"PATCH"`, or `"DELETE"`.
72
+ url: The URL of the HTTP request. Either as an instance of `httpcore.URL`,
73
+ or as str/bytes.
74
+ headers: The HTTP request headers. Either as a dictionary of str/bytes,
75
+ or as a list of two-tuples of str/bytes.
76
+ content: The content of the request body. Either as bytes,
77
+ or as a bytes iterator.
78
+ extensions: A dictionary of optional extra information included on the request.
79
+ Possible keys include `"timeout"`.
80
+
81
+ Returns:
82
+ An instance of `httpcore.Response`.
83
+ """
84
+ with ConnectionPool() as pool:
85
+ with pool.stream(
86
+ method=method,
87
+ url=url,
88
+ headers=headers,
89
+ content=content,
90
+ extensions=extensions,
91
+ ) as response:
92
+ yield response
evalkit_internvl/lib/python3.10/site-packages/httpcore/_exceptions.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ from typing import Iterator, Mapping, Type
3
+
4
+ ExceptionMapping = Mapping[Type[Exception], Type[Exception]]
5
+
6
+
7
+ @contextlib.contextmanager
8
+ def map_exceptions(map: ExceptionMapping) -> Iterator[None]:
9
+ try:
10
+ yield
11
+ except Exception as exc: # noqa: PIE786
12
+ for from_exc, to_exc in map.items():
13
+ if isinstance(exc, from_exc):
14
+ raise to_exc(exc) from exc
15
+ raise # pragma: nocover
16
+
17
+
18
+ class ConnectionNotAvailable(Exception):
19
+ pass
20
+
21
+
22
+ class ProxyError(Exception):
23
+ pass
24
+
25
+
26
+ class UnsupportedProtocol(Exception):
27
+ pass
28
+
29
+
30
+ class ProtocolError(Exception):
31
+ pass
32
+
33
+
34
+ class RemoteProtocolError(ProtocolError):
35
+ pass
36
+
37
+
38
+ class LocalProtocolError(ProtocolError):
39
+ pass
40
+
41
+
42
+ # Timeout errors
43
+
44
+
45
+ class TimeoutException(Exception):
46
+ pass
47
+
48
+
49
+ class PoolTimeout(TimeoutException):
50
+ pass
51
+
52
+
53
+ class ConnectTimeout(TimeoutException):
54
+ pass
55
+
56
+
57
+ class ReadTimeout(TimeoutException):
58
+ pass
59
+
60
+
61
+ class WriteTimeout(TimeoutException):
62
+ pass
63
+
64
+
65
+ # Network errors
66
+
67
+
68
+ class NetworkError(Exception):
69
+ pass
70
+
71
+
72
+ class ConnectError(NetworkError):
73
+ pass
74
+
75
+
76
+ class ReadError(NetworkError):
77
+ pass
78
+
79
+
80
+ class WriteError(NetworkError):
81
+ pass
evalkit_internvl/lib/python3.10/site-packages/httpcore/_models.py ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import (
2
+ Any,
3
+ AsyncIterable,
4
+ AsyncIterator,
5
+ Iterable,
6
+ Iterator,
7
+ List,
8
+ Mapping,
9
+ Optional,
10
+ Sequence,
11
+ Tuple,
12
+ Union,
13
+ )
14
+ from urllib.parse import urlparse
15
+
16
+ # Functions for typechecking...
17
+
18
+
19
+ HeadersAsSequence = Sequence[Tuple[Union[bytes, str], Union[bytes, str]]]
20
+ HeadersAsMapping = Mapping[Union[bytes, str], Union[bytes, str]]
21
+ HeaderTypes = Union[HeadersAsSequence, HeadersAsMapping, None]
22
+
23
+ Extensions = Mapping[str, Any]
24
+
25
+
26
+ def enforce_bytes(value: Union[bytes, str], *, name: str) -> bytes:
27
+ """
28
+ Any arguments that are ultimately represented as bytes can be specified
29
+ either as bytes or as strings.
30
+
31
+ However we enforce that any string arguments must only contain characters in
32
+ the plain ASCII range. chr(0)...chr(127). If you need to use characters
33
+ outside that range then be precise, and use a byte-wise argument.
34
+ """
35
+ if isinstance(value, str):
36
+ try:
37
+ return value.encode("ascii")
38
+ except UnicodeEncodeError:
39
+ raise TypeError(f"{name} strings may not include unicode characters.")
40
+ elif isinstance(value, bytes):
41
+ return value
42
+
43
+ seen_type = type(value).__name__
44
+ raise TypeError(f"{name} must be bytes or str, but got {seen_type}.")
45
+
46
+
47
+ def enforce_url(value: Union["URL", bytes, str], *, name: str) -> "URL":
48
+ """
49
+ Type check for URL parameters.
50
+ """
51
+ if isinstance(value, (bytes, str)):
52
+ return URL(value)
53
+ elif isinstance(value, URL):
54
+ return value
55
+
56
+ seen_type = type(value).__name__
57
+ raise TypeError(f"{name} must be a URL, bytes, or str, but got {seen_type}.")
58
+
59
+
60
+ def enforce_headers(
61
+ value: Union[HeadersAsMapping, HeadersAsSequence, None] = None, *, name: str
62
+ ) -> List[Tuple[bytes, bytes]]:
63
+ """
64
+ Convienence function that ensure all items in request or response headers
65
+ are either bytes or strings in the plain ASCII range.
66
+ """
67
+ if value is None:
68
+ return []
69
+ elif isinstance(value, Mapping):
70
+ return [
71
+ (
72
+ enforce_bytes(k, name="header name"),
73
+ enforce_bytes(v, name="header value"),
74
+ )
75
+ for k, v in value.items()
76
+ ]
77
+ elif isinstance(value, Sequence):
78
+ return [
79
+ (
80
+ enforce_bytes(k, name="header name"),
81
+ enforce_bytes(v, name="header value"),
82
+ )
83
+ for k, v in value
84
+ ]
85
+
86
+ seen_type = type(value).__name__
87
+ raise TypeError(
88
+ f"{name} must be a mapping or sequence of two-tuples, but got {seen_type}."
89
+ )
90
+
91
+
92
+ def enforce_stream(
93
+ value: Union[bytes, Iterable[bytes], AsyncIterable[bytes], None], *, name: str
94
+ ) -> Union[Iterable[bytes], AsyncIterable[bytes]]:
95
+ if value is None:
96
+ return ByteStream(b"")
97
+ elif isinstance(value, bytes):
98
+ return ByteStream(value)
99
+ return value
100
+
101
+
102
+ # * https://tools.ietf.org/html/rfc3986#section-3.2.3
103
+ # * https://url.spec.whatwg.org/#url-miscellaneous
104
+ # * https://url.spec.whatwg.org/#scheme-state
105
+ DEFAULT_PORTS = {
106
+ b"ftp": 21,
107
+ b"http": 80,
108
+ b"https": 443,
109
+ b"ws": 80,
110
+ b"wss": 443,
111
+ }
112
+
113
+
114
+ def include_request_headers(
115
+ headers: List[Tuple[bytes, bytes]],
116
+ *,
117
+ url: "URL",
118
+ content: Union[None, bytes, Iterable[bytes], AsyncIterable[bytes]],
119
+ ) -> List[Tuple[bytes, bytes]]:
120
+ headers_set = set(k.lower() for k, v in headers)
121
+
122
+ if b"host" not in headers_set:
123
+ default_port = DEFAULT_PORTS.get(url.scheme)
124
+ if url.port is None or url.port == default_port:
125
+ header_value = url.host
126
+ else:
127
+ header_value = b"%b:%d" % (url.host, url.port)
128
+ headers = [(b"Host", header_value)] + headers
129
+
130
+ if (
131
+ content is not None
132
+ and b"content-length" not in headers_set
133
+ and b"transfer-encoding" not in headers_set
134
+ ):
135
+ if isinstance(content, bytes):
136
+ content_length = str(len(content)).encode("ascii")
137
+ headers += [(b"Content-Length", content_length)]
138
+ else:
139
+ headers += [(b"Transfer-Encoding", b"chunked")] # pragma: nocover
140
+
141
+ return headers
142
+
143
+
144
+ # Interfaces for byte streams...
145
+
146
+
147
+ class ByteStream:
148
+ """
149
+ A container for non-streaming content, and that supports both sync and async
150
+ stream iteration.
151
+ """
152
+
153
+ def __init__(self, content: bytes) -> None:
154
+ self._content = content
155
+
156
+ def __iter__(self) -> Iterator[bytes]:
157
+ yield self._content
158
+
159
+ async def __aiter__(self) -> AsyncIterator[bytes]:
160
+ yield self._content
161
+
162
+ def __repr__(self) -> str:
163
+ return f"<{self.__class__.__name__} [{len(self._content)} bytes]>"
164
+
165
+
166
+ class Origin:
167
+ def __init__(self, scheme: bytes, host: bytes, port: int) -> None:
168
+ self.scheme = scheme
169
+ self.host = host
170
+ self.port = port
171
+
172
+ def __eq__(self, other: Any) -> bool:
173
+ return (
174
+ isinstance(other, Origin)
175
+ and self.scheme == other.scheme
176
+ and self.host == other.host
177
+ and self.port == other.port
178
+ )
179
+
180
+ def __str__(self) -> str:
181
+ scheme = self.scheme.decode("ascii")
182
+ host = self.host.decode("ascii")
183
+ port = str(self.port)
184
+ return f"{scheme}://{host}:{port}"
185
+
186
+
187
+ class URL:
188
+ """
189
+ Represents the URL against which an HTTP request may be made.
190
+
191
+ The URL may either be specified as a plain string, for convienence:
192
+
193
+ ```python
194
+ url = httpcore.URL("https://www.example.com/")
195
+ ```
196
+
197
+ Or be constructed with explicitily pre-parsed components:
198
+
199
+ ```python
200
+ url = httpcore.URL(scheme=b'https', host=b'www.example.com', port=None, target=b'/')
201
+ ```
202
+
203
+ Using this second more explicit style allows integrations that are using
204
+ `httpcore` to pass through URLs that have already been parsed in order to use
205
+ libraries such as `rfc-3986` rather than relying on the stdlib. It also ensures
206
+ that URL parsing is treated identically at both the networking level and at any
207
+ higher layers of abstraction.
208
+
209
+ The four components are important here, as they allow the URL to be precisely
210
+ specified in a pre-parsed format. They also allow certain types of request to
211
+ be created that could not otherwise be expressed.
212
+
213
+ For example, an HTTP request to `http://www.example.com/` forwarded via a proxy
214
+ at `http://localhost:8080`...
215
+
216
+ ```python
217
+ # Constructs an HTTP request with a complete URL as the target:
218
+ # GET https://www.example.com/ HTTP/1.1
219
+ url = httpcore.URL(
220
+ scheme=b'http',
221
+ host=b'localhost',
222
+ port=8080,
223
+ target=b'https://www.example.com/'
224
+ )
225
+ request = httpcore.Request(
226
+ method="GET",
227
+ url=url
228
+ )
229
+ ```
230
+
231
+ Another example is constructing an `OPTIONS *` request...
232
+
233
+ ```python
234
+ # Constructs an 'OPTIONS *' HTTP request:
235
+ # OPTIONS * HTTP/1.1
236
+ url = httpcore.URL(scheme=b'https', host=b'www.example.com', target=b'*')
237
+ request = httpcore.Request(method="OPTIONS", url=url)
238
+ ```
239
+
240
+ This kind of request is not possible to formulate with a URL string,
241
+ because the `/` delimiter is always used to demark the target from the
242
+ host/port portion of the URL.
243
+
244
+ For convenience, string-like arguments may be specified either as strings or
245
+ as bytes. However, once a request is being issue over-the-wire, the URL
246
+ components are always ultimately required to be a bytewise representation.
247
+
248
+ In order to avoid any ambiguity over character encodings, when strings are used
249
+ as arguments, they must be strictly limited to the ASCII range `chr(0)`-`chr(127)`.
250
+ If you require a bytewise representation that is outside this range you must
251
+ handle the character encoding directly, and pass a bytes instance.
252
+ """
253
+
254
+ def __init__(
255
+ self,
256
+ url: Union[bytes, str] = "",
257
+ *,
258
+ scheme: Union[bytes, str] = b"",
259
+ host: Union[bytes, str] = b"",
260
+ port: Optional[int] = None,
261
+ target: Union[bytes, str] = b"",
262
+ ) -> None:
263
+ """
264
+ Parameters:
265
+ url: The complete URL as a string or bytes.
266
+ scheme: The URL scheme as a string or bytes.
267
+ Typically either `"http"` or `"https"`.
268
+ host: The URL host as a string or bytes. Such as `"www.example.com"`.
269
+ port: The port to connect to. Either an integer or `None`.
270
+ target: The target of the HTTP request. Such as `"/items?search=red"`.
271
+ """
272
+ if url:
273
+ parsed = urlparse(enforce_bytes(url, name="url"))
274
+ self.scheme = parsed.scheme
275
+ self.host = parsed.hostname or b""
276
+ self.port = parsed.port
277
+ self.target = (parsed.path or b"/") + (
278
+ b"?" + parsed.query if parsed.query else b""
279
+ )
280
+ else:
281
+ self.scheme = enforce_bytes(scheme, name="scheme")
282
+ self.host = enforce_bytes(host, name="host")
283
+ self.port = port
284
+ self.target = enforce_bytes(target, name="target")
285
+
286
+ @property
287
+ def origin(self) -> Origin:
288
+ default_port = {
289
+ b"http": 80,
290
+ b"https": 443,
291
+ b"ws": 80,
292
+ b"wss": 443,
293
+ b"socks5": 1080,
294
+ }[self.scheme]
295
+ return Origin(
296
+ scheme=self.scheme, host=self.host, port=self.port or default_port
297
+ )
298
+
299
+ def __eq__(self, other: Any) -> bool:
300
+ return (
301
+ isinstance(other, URL)
302
+ and other.scheme == self.scheme
303
+ and other.host == self.host
304
+ and other.port == self.port
305
+ and other.target == self.target
306
+ )
307
+
308
+ def __bytes__(self) -> bytes:
309
+ if self.port is None:
310
+ return b"%b://%b%b" % (self.scheme, self.host, self.target)
311
+ return b"%b://%b:%d%b" % (self.scheme, self.host, self.port, self.target)
312
+
313
+ def __repr__(self) -> str:
314
+ return (
315
+ f"{self.__class__.__name__}(scheme={self.scheme!r}, "
316
+ f"host={self.host!r}, port={self.port!r}, target={self.target!r})"
317
+ )
318
+
319
+
320
+ class Request:
321
+ """
322
+ An HTTP request.
323
+ """
324
+
325
+ def __init__(
326
+ self,
327
+ method: Union[bytes, str],
328
+ url: Union[URL, bytes, str],
329
+ *,
330
+ headers: HeaderTypes = None,
331
+ content: Union[bytes, Iterable[bytes], AsyncIterable[bytes], None] = None,
332
+ extensions: Optional[Extensions] = None,
333
+ ) -> None:
334
+ """
335
+ Parameters:
336
+ method: The HTTP request method, either as a string or bytes.
337
+ For example: `GET`.
338
+ url: The request URL, either as a `URL` instance, or as a string or bytes.
339
+ For example: `"https://www.example.com".`
340
+ headers: The HTTP request headers.
341
+ content: The content of the response body.
342
+ extensions: A dictionary of optional extra information included on
343
+ the request. Possible keys include `"timeout"`, and `"trace"`.
344
+ """
345
+ self.method: bytes = enforce_bytes(method, name="method")
346
+ self.url: URL = enforce_url(url, name="url")
347
+ self.headers: List[Tuple[bytes, bytes]] = enforce_headers(
348
+ headers, name="headers"
349
+ )
350
+ self.stream: Union[Iterable[bytes], AsyncIterable[bytes]] = enforce_stream(
351
+ content, name="content"
352
+ )
353
+ self.extensions = {} if extensions is None else extensions
354
+
355
+ def __repr__(self) -> str:
356
+ return f"<{self.__class__.__name__} [{self.method!r}]>"
357
+
358
+
359
+ class Response:
360
+ """
361
+ An HTTP response.
362
+ """
363
+
364
+ def __init__(
365
+ self,
366
+ status: int,
367
+ *,
368
+ headers: HeaderTypes = None,
369
+ content: Union[bytes, Iterable[bytes], AsyncIterable[bytes], None] = None,
370
+ extensions: Optional[Extensions] = None,
371
+ ) -> None:
372
+ """
373
+ Parameters:
374
+ status: The HTTP status code of the response. For example `200`.
375
+ headers: The HTTP response headers.
376
+ content: The content of the response body.
377
+ extensions: A dictionary of optional extra information included on
378
+ the responseself.Possible keys include `"http_version"`,
379
+ `"reason_phrase"`, and `"network_stream"`.
380
+ """
381
+ self.status: int = status
382
+ self.headers: List[Tuple[bytes, bytes]] = enforce_headers(
383
+ headers, name="headers"
384
+ )
385
+ self.stream: Union[Iterable[bytes], AsyncIterable[bytes]] = enforce_stream(
386
+ content, name="content"
387
+ )
388
+ self.extensions = {} if extensions is None else extensions
389
+
390
+ self._stream_consumed = False
391
+
392
+ @property
393
+ def content(self) -> bytes:
394
+ if not hasattr(self, "_content"):
395
+ if isinstance(self.stream, Iterable):
396
+ raise RuntimeError(
397
+ "Attempted to access 'response.content' on a streaming response. "
398
+ "Call 'response.read()' first."
399
+ )
400
+ else:
401
+ raise RuntimeError(
402
+ "Attempted to access 'response.content' on a streaming response. "
403
+ "Call 'await response.aread()' first."
404
+ )
405
+ return self._content
406
+
407
+ def __repr__(self) -> str:
408
+ return f"<{self.__class__.__name__} [{self.status}]>"
409
+
410
+ # Sync interface...
411
+
412
+ def read(self) -> bytes:
413
+ if not isinstance(self.stream, Iterable): # pragma: nocover
414
+ raise RuntimeError(
415
+ "Attempted to read an asynchronous response using 'response.read()'. "
416
+ "You should use 'await response.aread()' instead."
417
+ )
418
+ if not hasattr(self, "_content"):
419
+ self._content = b"".join([part for part in self.iter_stream()])
420
+ return self._content
421
+
422
+ def iter_stream(self) -> Iterator[bytes]:
423
+ if not isinstance(self.stream, Iterable): # pragma: nocover
424
+ raise RuntimeError(
425
+ "Attempted to stream an asynchronous response using 'for ... in "
426
+ "response.iter_stream()'. "
427
+ "You should use 'async for ... in response.aiter_stream()' instead."
428
+ )
429
+ if self._stream_consumed:
430
+ raise RuntimeError(
431
+ "Attempted to call 'for ... in response.iter_stream()' more than once."
432
+ )
433
+ self._stream_consumed = True
434
+ for chunk in self.stream:
435
+ yield chunk
436
+
437
+ def close(self) -> None:
438
+ if not isinstance(self.stream, Iterable): # pragma: nocover
439
+ raise RuntimeError(
440
+ "Attempted to close an asynchronous response using 'response.close()'. "
441
+ "You should use 'await response.aclose()' instead."
442
+ )
443
+ if hasattr(self.stream, "close"):
444
+ self.stream.close()
445
+
446
+ # Async interface...
447
+
448
+ async def aread(self) -> bytes:
449
+ if not isinstance(self.stream, AsyncIterable): # pragma: nocover
450
+ raise RuntimeError(
451
+ "Attempted to read an synchronous response using "
452
+ "'await response.aread()'. "
453
+ "You should use 'response.read()' instead."
454
+ )
455
+ if not hasattr(self, "_content"):
456
+ self._content = b"".join([part async for part in self.aiter_stream()])
457
+ return self._content
458
+
459
+ async def aiter_stream(self) -> AsyncIterator[bytes]:
460
+ if not isinstance(self.stream, AsyncIterable): # pragma: nocover
461
+ raise RuntimeError(
462
+ "Attempted to stream an synchronous response using 'async for ... in "
463
+ "response.aiter_stream()'. "
464
+ "You should use 'for ... in response.iter_stream()' instead."
465
+ )
466
+ if self._stream_consumed:
467
+ raise RuntimeError(
468
+ "Attempted to call 'async for ... in response.aiter_stream()' "
469
+ "more than once."
470
+ )
471
+ self._stream_consumed = True
472
+ async for chunk in self.stream:
473
+ yield chunk
474
+
475
+ async def aclose(self) -> None:
476
+ if not isinstance(self.stream, AsyncIterable): # pragma: nocover
477
+ raise RuntimeError(
478
+ "Attempted to close a synchronous response using "
479
+ "'await response.aclose()'. "
480
+ "You should use 'response.close()' instead."
481
+ )
482
+ if hasattr(self.stream, "aclose"):
483
+ await self.stream.aclose()
evalkit_internvl/lib/python3.10/site-packages/httpcore/_ssl.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import ssl
2
+
3
+ import certifi
4
+
5
+
6
+ def default_ssl_context() -> ssl.SSLContext:
7
+ context = ssl.create_default_context()
8
+ context.load_verify_locations(certifi.where())
9
+ return context
evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__init__.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .connection import HTTPConnection
2
+ from .connection_pool import ConnectionPool
3
+ from .http11 import HTTP11Connection
4
+ from .http_proxy import HTTPProxy
5
+ from .interfaces import ConnectionInterface
6
+
7
+ try:
8
+ from .http2 import HTTP2Connection
9
+ except ImportError: # pragma: nocover
10
+
11
+ class HTTP2Connection: # type: ignore
12
+ def __init__(self, *args, **kwargs) -> None: # type: ignore
13
+ raise RuntimeError(
14
+ "Attempted to use http2 support, but the `h2` package is not "
15
+ "installed. Use 'pip install httpcore[http2]'."
16
+ )
17
+
18
+
19
+ try:
20
+ from .socks_proxy import SOCKSProxy
21
+ except ImportError: # pragma: nocover
22
+
23
+ class SOCKSProxy: # type: ignore
24
+ def __init__(self, *args, **kwargs) -> None: # type: ignore
25
+ raise RuntimeError(
26
+ "Attempted to use SOCKS support, but the `socksio` package is not "
27
+ "installed. Use 'pip install httpcore[socks]'."
28
+ )
29
+
30
+
31
+ __all__ = [
32
+ "HTTPConnection",
33
+ "ConnectionPool",
34
+ "HTTPProxy",
35
+ "HTTP11Connection",
36
+ "HTTP2Connection",
37
+ "ConnectionInterface",
38
+ "SOCKSProxy",
39
+ ]
evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/connection_pool.py ADDED
@@ -0,0 +1,356 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ssl
2
+ import sys
3
+ from types import TracebackType
4
+ from typing import Iterable, Iterator, Iterable, List, Optional, Type
5
+
6
+ from .._backends.sync import SyncBackend
7
+ from .._backends.base import SOCKET_OPTION, NetworkBackend
8
+ from .._exceptions import ConnectionNotAvailable, UnsupportedProtocol
9
+ from .._models import Origin, Request, Response
10
+ from .._synchronization import Event, Lock, ShieldCancellation
11
+ from .connection import HTTPConnection
12
+ from .interfaces import ConnectionInterface, RequestInterface
13
+
14
+
15
+ class RequestStatus:
16
+ def __init__(self, request: Request):
17
+ self.request = request
18
+ self.connection: Optional[ConnectionInterface] = None
19
+ self._connection_acquired = Event()
20
+
21
+ def set_connection(self, connection: ConnectionInterface) -> None:
22
+ assert self.connection is None
23
+ self.connection = connection
24
+ self._connection_acquired.set()
25
+
26
+ def unset_connection(self) -> None:
27
+ assert self.connection is not None
28
+ self.connection = None
29
+ self._connection_acquired = Event()
30
+
31
+ def wait_for_connection(
32
+ self, timeout: Optional[float] = None
33
+ ) -> ConnectionInterface:
34
+ if self.connection is None:
35
+ self._connection_acquired.wait(timeout=timeout)
36
+ assert self.connection is not None
37
+ return self.connection
38
+
39
+
40
+ class ConnectionPool(RequestInterface):
41
+ """
42
+ A connection pool for making HTTP requests.
43
+ """
44
+
45
+ def __init__(
46
+ self,
47
+ ssl_context: Optional[ssl.SSLContext] = None,
48
+ max_connections: Optional[int] = 10,
49
+ max_keepalive_connections: Optional[int] = None,
50
+ keepalive_expiry: Optional[float] = None,
51
+ http1: bool = True,
52
+ http2: bool = False,
53
+ retries: int = 0,
54
+ local_address: Optional[str] = None,
55
+ uds: Optional[str] = None,
56
+ network_backend: Optional[NetworkBackend] = None,
57
+ socket_options: Optional[Iterable[SOCKET_OPTION]] = None,
58
+ ) -> None:
59
+ """
60
+ A connection pool for making HTTP requests.
61
+
62
+ Parameters:
63
+ ssl_context: An SSL context to use for verifying connections.
64
+ If not specified, the default `httpcore.default_ssl_context()`
65
+ will be used.
66
+ max_connections: The maximum number of concurrent HTTP connections that
67
+ the pool should allow. Any attempt to send a request on a pool that
68
+ would exceed this amount will block until a connection is available.
69
+ max_keepalive_connections: The maximum number of idle HTTP connections
70
+ that will be maintained in the pool.
71
+ keepalive_expiry: The duration in seconds that an idle HTTP connection
72
+ may be maintained for before being expired from the pool.
73
+ http1: A boolean indicating if HTTP/1.1 requests should be supported
74
+ by the connection pool. Defaults to True.
75
+ http2: A boolean indicating if HTTP/2 requests should be supported by
76
+ the connection pool. Defaults to False.
77
+ retries: The maximum number of retries when trying to establish a
78
+ connection.
79
+ local_address: Local address to connect from. Can also be used to connect
80
+ using a particular address family. Using `local_address="0.0.0.0"`
81
+ will connect using an `AF_INET` address (IPv4), while using
82
+ `local_address="::"` will connect using an `AF_INET6` address (IPv6).
83
+ uds: Path to a Unix Domain Socket to use instead of TCP sockets.
84
+ network_backend: A backend instance to use for handling network I/O.
85
+ socket_options: Socket options that have to be included
86
+ in the TCP socket when the connection was established.
87
+ """
88
+ self._ssl_context = ssl_context
89
+
90
+ self._max_connections = (
91
+ sys.maxsize if max_connections is None else max_connections
92
+ )
93
+ self._max_keepalive_connections = (
94
+ sys.maxsize
95
+ if max_keepalive_connections is None
96
+ else max_keepalive_connections
97
+ )
98
+ self._max_keepalive_connections = min(
99
+ self._max_connections, self._max_keepalive_connections
100
+ )
101
+
102
+ self._keepalive_expiry = keepalive_expiry
103
+ self._http1 = http1
104
+ self._http2 = http2
105
+ self._retries = retries
106
+ self._local_address = local_address
107
+ self._uds = uds
108
+
109
+ self._pool: List[ConnectionInterface] = []
110
+ self._requests: List[RequestStatus] = []
111
+ self._pool_lock = Lock()
112
+ self._network_backend = (
113
+ SyncBackend() if network_backend is None else network_backend
114
+ )
115
+ self._socket_options = socket_options
116
+
117
+ def create_connection(self, origin: Origin) -> ConnectionInterface:
118
+ return HTTPConnection(
119
+ origin=origin,
120
+ ssl_context=self._ssl_context,
121
+ keepalive_expiry=self._keepalive_expiry,
122
+ http1=self._http1,
123
+ http2=self._http2,
124
+ retries=self._retries,
125
+ local_address=self._local_address,
126
+ uds=self._uds,
127
+ network_backend=self._network_backend,
128
+ socket_options=self._socket_options,
129
+ )
130
+
131
+ @property
132
+ def connections(self) -> List[ConnectionInterface]:
133
+ """
134
+ Return a list of the connections currently in the pool.
135
+
136
+ For example:
137
+
138
+ ```python
139
+ >>> pool.connections
140
+ [
141
+ <HTTPConnection ['https://example.com:443', HTTP/1.1, ACTIVE, Request Count: 6]>,
142
+ <HTTPConnection ['https://example.com:443', HTTP/1.1, IDLE, Request Count: 9]> ,
143
+ <HTTPConnection ['http://example.com:80', HTTP/1.1, IDLE, Request Count: 1]>,
144
+ ]
145
+ ```
146
+ """
147
+ return list(self._pool)
148
+
149
+ def _attempt_to_acquire_connection(self, status: RequestStatus) -> bool:
150
+ """
151
+ Attempt to provide a connection that can handle the given origin.
152
+ """
153
+ origin = status.request.url.origin
154
+
155
+ # If there are queued requests in front of us, then don't acquire a
156
+ # connection. We handle requests strictly in order.
157
+ waiting = [s for s in self._requests if s.connection is None]
158
+ if waiting and waiting[0] is not status:
159
+ return False
160
+
161
+ # Reuse an existing connection if one is currently available.
162
+ for idx, connection in enumerate(self._pool):
163
+ if connection.can_handle_request(origin) and connection.is_available():
164
+ self._pool.pop(idx)
165
+ self._pool.insert(0, connection)
166
+ status.set_connection(connection)
167
+ return True
168
+
169
+ # If the pool is currently full, attempt to close one idle connection.
170
+ if len(self._pool) >= self._max_connections:
171
+ for idx, connection in reversed(list(enumerate(self._pool))):
172
+ if connection.is_idle():
173
+ connection.close()
174
+ self._pool.pop(idx)
175
+ break
176
+
177
+ # If the pool is still full, then we cannot acquire a connection.
178
+ if len(self._pool) >= self._max_connections:
179
+ return False
180
+
181
+ # Otherwise create a new connection.
182
+ connection = self.create_connection(origin)
183
+ self._pool.insert(0, connection)
184
+ status.set_connection(connection)
185
+ return True
186
+
187
+ def _close_expired_connections(self) -> None:
188
+ """
189
+ Clean up the connection pool by closing off any connections that have expired.
190
+ """
191
+ # Close any connections that have expired their keep-alive time.
192
+ for idx, connection in reversed(list(enumerate(self._pool))):
193
+ if connection.has_expired():
194
+ connection.close()
195
+ self._pool.pop(idx)
196
+
197
+ # If the pool size exceeds the maximum number of allowed keep-alive connections,
198
+ # then close off idle connections as required.
199
+ pool_size = len(self._pool)
200
+ for idx, connection in reversed(list(enumerate(self._pool))):
201
+ if connection.is_idle() and pool_size > self._max_keepalive_connections:
202
+ connection.close()
203
+ self._pool.pop(idx)
204
+ pool_size -= 1
205
+
206
+ def handle_request(self, request: Request) -> Response:
207
+ """
208
+ Send an HTTP request, and return an HTTP response.
209
+
210
+ This is the core implementation that is called into by `.request()` or `.stream()`.
211
+ """
212
+ scheme = request.url.scheme.decode()
213
+ if scheme == "":
214
+ raise UnsupportedProtocol(
215
+ "Request URL is missing an 'http://' or 'https://' protocol."
216
+ )
217
+ if scheme not in ("http", "https", "ws", "wss"):
218
+ raise UnsupportedProtocol(
219
+ f"Request URL has an unsupported protocol '{scheme}://'."
220
+ )
221
+
222
+ status = RequestStatus(request)
223
+
224
+ with self._pool_lock:
225
+ self._requests.append(status)
226
+ self._close_expired_connections()
227
+ self._attempt_to_acquire_connection(status)
228
+
229
+ while True:
230
+ timeouts = request.extensions.get("timeout", {})
231
+ timeout = timeouts.get("pool", None)
232
+ try:
233
+ connection = status.wait_for_connection(timeout=timeout)
234
+ except BaseException as exc:
235
+ # If we timeout here, or if the task is cancelled, then make
236
+ # sure to remove the request from the queue before bubbling
237
+ # up the exception.
238
+ with self._pool_lock:
239
+ # Ensure only remove when task exists.
240
+ if status in self._requests:
241
+ self._requests.remove(status)
242
+ raise exc
243
+
244
+ try:
245
+ response = connection.handle_request(request)
246
+ except ConnectionNotAvailable:
247
+ # The ConnectionNotAvailable exception is a special case, that
248
+ # indicates we need to retry the request on a new connection.
249
+ #
250
+ # The most common case where this can occur is when multiple
251
+ # requests are queued waiting for a single connection, which
252
+ # might end up as an HTTP/2 connection, but which actually ends
253
+ # up as HTTP/1.1.
254
+ with self._pool_lock:
255
+ # Maintain our position in the request queue, but reset the
256
+ # status so that the request becomes queued again.
257
+ status.unset_connection()
258
+ self._attempt_to_acquire_connection(status)
259
+ except BaseException as exc:
260
+ with ShieldCancellation():
261
+ self.response_closed(status)
262
+ raise exc
263
+ else:
264
+ break
265
+
266
+ # When we return the response, we wrap the stream in a special class
267
+ # that handles notifying the connection pool once the response
268
+ # has been released.
269
+ assert isinstance(response.stream, Iterable)
270
+ return Response(
271
+ status=response.status,
272
+ headers=response.headers,
273
+ content=ConnectionPoolByteStream(response.stream, self, status),
274
+ extensions=response.extensions,
275
+ )
276
+
277
+ def response_closed(self, status: RequestStatus) -> None:
278
+ """
279
+ This method acts as a callback once the request/response cycle is complete.
280
+
281
+ It is called into from the `ConnectionPoolByteStream.close()` method.
282
+ """
283
+ assert status.connection is not None
284
+ connection = status.connection
285
+
286
+ with self._pool_lock:
287
+ # Update the state of the connection pool.
288
+ if status in self._requests:
289
+ self._requests.remove(status)
290
+
291
+ if connection.is_closed() and connection in self._pool:
292
+ self._pool.remove(connection)
293
+
294
+ # Since we've had a response closed, it's possible we'll now be able
295
+ # to service one or more requests that are currently pending.
296
+ for status in self._requests:
297
+ if status.connection is None:
298
+ acquired = self._attempt_to_acquire_connection(status)
299
+ # If we could not acquire a connection for a queued request
300
+ # then we don't need to check anymore requests that are
301
+ # queued later behind it.
302
+ if not acquired:
303
+ break
304
+
305
+ # Housekeeping.
306
+ self._close_expired_connections()
307
+
308
+ def close(self) -> None:
309
+ """
310
+ Close any connections in the pool.
311
+ """
312
+ with self._pool_lock:
313
+ for connection in self._pool:
314
+ connection.close()
315
+ self._pool = []
316
+ self._requests = []
317
+
318
+ def __enter__(self) -> "ConnectionPool":
319
+ return self
320
+
321
+ def __exit__(
322
+ self,
323
+ exc_type: Optional[Type[BaseException]] = None,
324
+ exc_value: Optional[BaseException] = None,
325
+ traceback: Optional[TracebackType] = None,
326
+ ) -> None:
327
+ self.close()
328
+
329
+
330
+ class ConnectionPoolByteStream:
331
+ """
332
+ A wrapper around the response byte stream, that additionally handles
333
+ notifying the connection pool when the response has been closed.
334
+ """
335
+
336
+ def __init__(
337
+ self,
338
+ stream: Iterable[bytes],
339
+ pool: ConnectionPool,
340
+ status: RequestStatus,
341
+ ) -> None:
342
+ self._stream = stream
343
+ self._pool = pool
344
+ self._status = status
345
+
346
+ def __iter__(self) -> Iterator[bytes]:
347
+ for part in self._stream:
348
+ yield part
349
+
350
+ def close(self) -> None:
351
+ try:
352
+ if hasattr(self._stream, "close"):
353
+ self._stream.close()
354
+ finally:
355
+ with ShieldCancellation():
356
+ self._pool.response_closed(self._status)
evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/interfaces.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import contextmanager
2
+ from typing import Iterator, Optional, Union
3
+
4
+ from .._models import (
5
+ URL,
6
+ Extensions,
7
+ HeaderTypes,
8
+ Origin,
9
+ Request,
10
+ Response,
11
+ enforce_bytes,
12
+ enforce_headers,
13
+ enforce_url,
14
+ include_request_headers,
15
+ )
16
+
17
+
18
+ class RequestInterface:
19
+ def request(
20
+ self,
21
+ method: Union[bytes, str],
22
+ url: Union[URL, bytes, str],
23
+ *,
24
+ headers: HeaderTypes = None,
25
+ content: Union[bytes, Iterator[bytes], None] = None,
26
+ extensions: Optional[Extensions] = None,
27
+ ) -> Response:
28
+ # Strict type checking on our parameters.
29
+ method = enforce_bytes(method, name="method")
30
+ url = enforce_url(url, name="url")
31
+ headers = enforce_headers(headers, name="headers")
32
+
33
+ # Include Host header, and optionally Content-Length or Transfer-Encoding.
34
+ headers = include_request_headers(headers, url=url, content=content)
35
+
36
+ request = Request(
37
+ method=method,
38
+ url=url,
39
+ headers=headers,
40
+ content=content,
41
+ extensions=extensions,
42
+ )
43
+ response = self.handle_request(request)
44
+ try:
45
+ response.read()
46
+ finally:
47
+ response.close()
48
+ return response
49
+
50
+ @contextmanager
51
+ def stream(
52
+ self,
53
+ method: Union[bytes, str],
54
+ url: Union[URL, bytes, str],
55
+ *,
56
+ headers: HeaderTypes = None,
57
+ content: Union[bytes, Iterator[bytes], None] = None,
58
+ extensions: Optional[Extensions] = None,
59
+ ) -> Iterator[Response]:
60
+ # Strict type checking on our parameters.
61
+ method = enforce_bytes(method, name="method")
62
+ url = enforce_url(url, name="url")
63
+ headers = enforce_headers(headers, name="headers")
64
+
65
+ # Include Host header, and optionally Content-Length or Transfer-Encoding.
66
+ headers = include_request_headers(headers, url=url, content=content)
67
+
68
+ request = Request(
69
+ method=method,
70
+ url=url,
71
+ headers=headers,
72
+ content=content,
73
+ extensions=extensions,
74
+ )
75
+ response = self.handle_request(request)
76
+ try:
77
+ yield response
78
+ finally:
79
+ response.close()
80
+
81
+ def handle_request(self, request: Request) -> Response:
82
+ raise NotImplementedError() # pragma: nocover
83
+
84
+
85
+ class ConnectionInterface(RequestInterface):
86
+ def close(self) -> None:
87
+ raise NotImplementedError() # pragma: nocover
88
+
89
+ def info(self) -> str:
90
+ raise NotImplementedError() # pragma: nocover
91
+
92
+ def can_handle_request(self, origin: Origin) -> bool:
93
+ raise NotImplementedError() # pragma: nocover
94
+
95
+ def is_available(self) -> bool:
96
+ """
97
+ Return `True` if the connection is currently able to accept an
98
+ outgoing request.
99
+
100
+ An HTTP/1.1 connection will only be available if it is currently idle.
101
+
102
+ An HTTP/2 connection will be available so long as the stream ID space is
103
+ not yet exhausted, and the connection is not in an error state.
104
+
105
+ While the connection is being established we may not yet know if it is going
106
+ to result in an HTTP/1.1 or HTTP/2 connection. The connection should be
107
+ treated as being available, but might ultimately raise `NewConnectionRequired`
108
+ required exceptions if multiple requests are attempted over a connection
109
+ that ends up being established as HTTP/1.1.
110
+ """
111
+ raise NotImplementedError() # pragma: nocover
112
+
113
+ def has_expired(self) -> bool:
114
+ """
115
+ Return `True` if the connection is in a state where it should be closed.
116
+
117
+ This either means that the connection is idle and it has passed the
118
+ expiry time on its keep-alive, or that server has sent an EOF.
119
+ """
120
+ raise NotImplementedError() # pragma: nocover
121
+
122
+ def is_idle(self) -> bool:
123
+ """
124
+ Return `True` if the connection is currently idle.
125
+ """
126
+ raise NotImplementedError() # pragma: nocover
127
+
128
+ def is_closed(self) -> bool:
129
+ """
130
+ Return `True` if the connection has been closed.
131
+
132
+ Used when a response is closed to determine if the connection may be
133
+ returned to the connection pool or not.
134
+ """
135
+ raise NotImplementedError() # pragma: nocover
evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/socks_proxy.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import ssl
3
+ import typing
4
+
5
+ from socksio import socks5
6
+
7
+ from .._backends.sync import SyncBackend
8
+ from .._backends.base import NetworkBackend, NetworkStream
9
+ from .._exceptions import ConnectionNotAvailable, ProxyError
10
+ from .._models import URL, Origin, Request, Response, enforce_bytes, enforce_url
11
+ from .._ssl import default_ssl_context
12
+ from .._synchronization import Lock
13
+ from .._trace import Trace
14
+ from .connection_pool import ConnectionPool
15
+ from .http11 import HTTP11Connection
16
+ from .interfaces import ConnectionInterface
17
+
18
+ logger = logging.getLogger("httpcore.socks")
19
+
20
+
21
+ AUTH_METHODS = {
22
+ b"\x00": "NO AUTHENTICATION REQUIRED",
23
+ b"\x01": "GSSAPI",
24
+ b"\x02": "USERNAME/PASSWORD",
25
+ b"\xff": "NO ACCEPTABLE METHODS",
26
+ }
27
+
28
+ REPLY_CODES = {
29
+ b"\x00": "Succeeded",
30
+ b"\x01": "General SOCKS server failure",
31
+ b"\x02": "Connection not allowed by ruleset",
32
+ b"\x03": "Network unreachable",
33
+ b"\x04": "Host unreachable",
34
+ b"\x05": "Connection refused",
35
+ b"\x06": "TTL expired",
36
+ b"\x07": "Command not supported",
37
+ b"\x08": "Address type not supported",
38
+ }
39
+
40
+
41
+ def _init_socks5_connection(
42
+ stream: NetworkStream,
43
+ *,
44
+ host: bytes,
45
+ port: int,
46
+ auth: typing.Optional[typing.Tuple[bytes, bytes]] = None,
47
+ ) -> None:
48
+ conn = socks5.SOCKS5Connection()
49
+
50
+ # Auth method request
51
+ auth_method = (
52
+ socks5.SOCKS5AuthMethod.NO_AUTH_REQUIRED
53
+ if auth is None
54
+ else socks5.SOCKS5AuthMethod.USERNAME_PASSWORD
55
+ )
56
+ conn.send(socks5.SOCKS5AuthMethodsRequest([auth_method]))
57
+ outgoing_bytes = conn.data_to_send()
58
+ stream.write(outgoing_bytes)
59
+
60
+ # Auth method response
61
+ incoming_bytes = stream.read(max_bytes=4096)
62
+ response = conn.receive_data(incoming_bytes)
63
+ assert isinstance(response, socks5.SOCKS5AuthReply)
64
+ if response.method != auth_method:
65
+ requested = AUTH_METHODS.get(auth_method, "UNKNOWN")
66
+ responded = AUTH_METHODS.get(response.method, "UNKNOWN")
67
+ raise ProxyError(
68
+ f"Requested {requested} from proxy server, but got {responded}."
69
+ )
70
+
71
+ if response.method == socks5.SOCKS5AuthMethod.USERNAME_PASSWORD:
72
+ # Username/password request
73
+ assert auth is not None
74
+ username, password = auth
75
+ conn.send(socks5.SOCKS5UsernamePasswordRequest(username, password))
76
+ outgoing_bytes = conn.data_to_send()
77
+ stream.write(outgoing_bytes)
78
+
79
+ # Username/password response
80
+ incoming_bytes = stream.read(max_bytes=4096)
81
+ response = conn.receive_data(incoming_bytes)
82
+ assert isinstance(response, socks5.SOCKS5UsernamePasswordReply)
83
+ if not response.success:
84
+ raise ProxyError("Invalid username/password")
85
+
86
+ # Connect request
87
+ conn.send(
88
+ socks5.SOCKS5CommandRequest.from_address(
89
+ socks5.SOCKS5Command.CONNECT, (host, port)
90
+ )
91
+ )
92
+ outgoing_bytes = conn.data_to_send()
93
+ stream.write(outgoing_bytes)
94
+
95
+ # Connect response
96
+ incoming_bytes = stream.read(max_bytes=4096)
97
+ response = conn.receive_data(incoming_bytes)
98
+ assert isinstance(response, socks5.SOCKS5Reply)
99
+ if response.reply_code != socks5.SOCKS5ReplyCode.SUCCEEDED:
100
+ reply_code = REPLY_CODES.get(response.reply_code, "UNKOWN")
101
+ raise ProxyError(f"Proxy Server could not connect: {reply_code}.")
102
+
103
+
104
+ class SOCKSProxy(ConnectionPool):
105
+ """
106
+ A connection pool that sends requests via an HTTP proxy.
107
+ """
108
+
109
+ def __init__(
110
+ self,
111
+ proxy_url: typing.Union[URL, bytes, str],
112
+ proxy_auth: typing.Optional[
113
+ typing.Tuple[typing.Union[bytes, str], typing.Union[bytes, str]]
114
+ ] = None,
115
+ ssl_context: typing.Optional[ssl.SSLContext] = None,
116
+ max_connections: typing.Optional[int] = 10,
117
+ max_keepalive_connections: typing.Optional[int] = None,
118
+ keepalive_expiry: typing.Optional[float] = None,
119
+ http1: bool = True,
120
+ http2: bool = False,
121
+ retries: int = 0,
122
+ network_backend: typing.Optional[NetworkBackend] = None,
123
+ ) -> None:
124
+ """
125
+ A connection pool for making HTTP requests.
126
+
127
+ Parameters:
128
+ proxy_url: The URL to use when connecting to the proxy server.
129
+ For example `"http://127.0.0.1:8080/"`.
130
+ ssl_context: An SSL context to use for verifying connections.
131
+ If not specified, the default `httpcore.default_ssl_context()`
132
+ will be used.
133
+ max_connections: The maximum number of concurrent HTTP connections that
134
+ the pool should allow. Any attempt to send a request on a pool that
135
+ would exceed this amount will block until a connection is available.
136
+ max_keepalive_connections: The maximum number of idle HTTP connections
137
+ that will be maintained in the pool.
138
+ keepalive_expiry: The duration in seconds that an idle HTTP connection
139
+ may be maintained for before being expired from the pool.
140
+ http1: A boolean indicating if HTTP/1.1 requests should be supported
141
+ by the connection pool. Defaults to True.
142
+ http2: A boolean indicating if HTTP/2 requests should be supported by
143
+ the connection pool. Defaults to False.
144
+ retries: The maximum number of retries when trying to establish
145
+ a connection.
146
+ local_address: Local address to connect from. Can also be used to
147
+ connect using a particular address family. Using
148
+ `local_address="0.0.0.0"` will connect using an `AF_INET` address
149
+ (IPv4), while using `local_address="::"` will connect using an
150
+ `AF_INET6` address (IPv6).
151
+ uds: Path to a Unix Domain Socket to use instead of TCP sockets.
152
+ network_backend: A backend instance to use for handling network I/O.
153
+ """
154
+ super().__init__(
155
+ ssl_context=ssl_context,
156
+ max_connections=max_connections,
157
+ max_keepalive_connections=max_keepalive_connections,
158
+ keepalive_expiry=keepalive_expiry,
159
+ http1=http1,
160
+ http2=http2,
161
+ network_backend=network_backend,
162
+ retries=retries,
163
+ )
164
+ self._ssl_context = ssl_context
165
+ self._proxy_url = enforce_url(proxy_url, name="proxy_url")
166
+ if proxy_auth is not None:
167
+ username, password = proxy_auth
168
+ username_bytes = enforce_bytes(username, name="proxy_auth")
169
+ password_bytes = enforce_bytes(password, name="proxy_auth")
170
+ self._proxy_auth: typing.Optional[typing.Tuple[bytes, bytes]] = (
171
+ username_bytes,
172
+ password_bytes,
173
+ )
174
+ else:
175
+ self._proxy_auth = None
176
+
177
+ def create_connection(self, origin: Origin) -> ConnectionInterface:
178
+ return Socks5Connection(
179
+ proxy_origin=self._proxy_url.origin,
180
+ remote_origin=origin,
181
+ proxy_auth=self._proxy_auth,
182
+ ssl_context=self._ssl_context,
183
+ keepalive_expiry=self._keepalive_expiry,
184
+ http1=self._http1,
185
+ http2=self._http2,
186
+ network_backend=self._network_backend,
187
+ )
188
+
189
+
190
+ class Socks5Connection(ConnectionInterface):
191
+ def __init__(
192
+ self,
193
+ proxy_origin: Origin,
194
+ remote_origin: Origin,
195
+ proxy_auth: typing.Optional[typing.Tuple[bytes, bytes]] = None,
196
+ ssl_context: typing.Optional[ssl.SSLContext] = None,
197
+ keepalive_expiry: typing.Optional[float] = None,
198
+ http1: bool = True,
199
+ http2: bool = False,
200
+ network_backend: typing.Optional[NetworkBackend] = None,
201
+ ) -> None:
202
+ self._proxy_origin = proxy_origin
203
+ self._remote_origin = remote_origin
204
+ self._proxy_auth = proxy_auth
205
+ self._ssl_context = ssl_context
206
+ self._keepalive_expiry = keepalive_expiry
207
+ self._http1 = http1
208
+ self._http2 = http2
209
+
210
+ self._network_backend: NetworkBackend = (
211
+ SyncBackend() if network_backend is None else network_backend
212
+ )
213
+ self._connect_lock = Lock()
214
+ self._connection: typing.Optional[ConnectionInterface] = None
215
+ self._connect_failed = False
216
+
217
+ def handle_request(self, request: Request) -> Response:
218
+ timeouts = request.extensions.get("timeout", {})
219
+ timeout = timeouts.get("connect", None)
220
+
221
+ with self._connect_lock:
222
+ if self._connection is None:
223
+ try:
224
+ # Connect to the proxy
225
+ kwargs = {
226
+ "host": self._proxy_origin.host.decode("ascii"),
227
+ "port": self._proxy_origin.port,
228
+ "timeout": timeout,
229
+ }
230
+ with Trace("connect_tcp", logger, request, kwargs) as trace:
231
+ stream = self._network_backend.connect_tcp(**kwargs)
232
+ trace.return_value = stream
233
+
234
+ # Connect to the remote host using socks5
235
+ kwargs = {
236
+ "stream": stream,
237
+ "host": self._remote_origin.host.decode("ascii"),
238
+ "port": self._remote_origin.port,
239
+ "auth": self._proxy_auth,
240
+ }
241
+ with Trace(
242
+ "setup_socks5_connection", logger, request, kwargs
243
+ ) as trace:
244
+ _init_socks5_connection(**kwargs)
245
+ trace.return_value = stream
246
+
247
+ # Upgrade the stream to SSL
248
+ if self._remote_origin.scheme == b"https":
249
+ ssl_context = (
250
+ default_ssl_context()
251
+ if self._ssl_context is None
252
+ else self._ssl_context
253
+ )
254
+ alpn_protocols = (
255
+ ["http/1.1", "h2"] if self._http2 else ["http/1.1"]
256
+ )
257
+ ssl_context.set_alpn_protocols(alpn_protocols)
258
+
259
+ kwargs = {
260
+ "ssl_context": ssl_context,
261
+ "server_hostname": self._remote_origin.host.decode("ascii"),
262
+ "timeout": timeout,
263
+ }
264
+ with Trace("start_tls", logger, request, kwargs) as trace:
265
+ stream = stream.start_tls(**kwargs)
266
+ trace.return_value = stream
267
+
268
+ # Determine if we should be using HTTP/1.1 or HTTP/2
269
+ ssl_object = stream.get_extra_info("ssl_object")
270
+ http2_negotiated = (
271
+ ssl_object is not None
272
+ and ssl_object.selected_alpn_protocol() == "h2"
273
+ )
274
+
275
+ # Create the HTTP/1.1 or HTTP/2 connection
276
+ if http2_negotiated or (
277
+ self._http2 and not self._http1
278
+ ): # pragma: nocover
279
+ from .http2 import HTTP2Connection
280
+
281
+ self._connection = HTTP2Connection(
282
+ origin=self._remote_origin,
283
+ stream=stream,
284
+ keepalive_expiry=self._keepalive_expiry,
285
+ )
286
+ else:
287
+ self._connection = HTTP11Connection(
288
+ origin=self._remote_origin,
289
+ stream=stream,
290
+ keepalive_expiry=self._keepalive_expiry,
291
+ )
292
+ except Exception as exc:
293
+ self._connect_failed = True
294
+ raise exc
295
+ elif not self._connection.is_available(): # pragma: nocover
296
+ raise ConnectionNotAvailable()
297
+
298
+ return self._connection.handle_request(request)
299
+
300
+ def can_handle_request(self, origin: Origin) -> bool:
301
+ return origin == self._remote_origin
302
+
303
+ def close(self) -> None:
304
+ if self._connection is not None:
305
+ self._connection.close()
306
+
307
+ def is_available(self) -> bool:
308
+ if self._connection is None: # pragma: nocover
309
+ # If HTTP/2 support is enabled, and the resulting connection could
310
+ # end up as HTTP/2 then we should indicate the connection as being
311
+ # available to service multiple requests.
312
+ return (
313
+ self._http2
314
+ and (self._remote_origin.scheme == b"https" or not self._http1)
315
+ and not self._connect_failed
316
+ )
317
+ return self._connection.is_available()
318
+
319
+ def has_expired(self) -> bool:
320
+ if self._connection is None: # pragma: nocover
321
+ return self._connect_failed
322
+ return self._connection.has_expired()
323
+
324
+ def is_idle(self) -> bool:
325
+ if self._connection is None: # pragma: nocover
326
+ return self._connect_failed
327
+ return self._connection.is_idle()
328
+
329
+ def is_closed(self) -> bool:
330
+ if self._connection is None: # pragma: nocover
331
+ return self._connect_failed
332
+ return self._connection.is_closed()
333
+
334
+ def info(self) -> str:
335
+ if self._connection is None: # pragma: nocover
336
+ return "CONNECTION FAILED" if self._connect_failed else "CONNECTING"
337
+ return self._connection.info()
338
+
339
+ def __repr__(self) -> str:
340
+ return f"<{self.__class__.__name__} [{self.info()}]>"
evalkit_internvl/lib/python3.10/site-packages/httpcore/_synchronization.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import threading
2
+ from types import TracebackType
3
+ from typing import Optional, Type
4
+
5
+ import sniffio
6
+
7
+ from ._exceptions import ExceptionMapping, PoolTimeout, map_exceptions
8
+
9
+ # Our async synchronization primatives use either 'anyio' or 'trio' depending
10
+ # on if they're running under asyncio or trio.
11
+
12
+ try:
13
+ import trio
14
+ except ImportError: # pragma: nocover
15
+ trio = None # type: ignore
16
+
17
+ try:
18
+ import anyio
19
+ except ImportError: # pragma: nocover
20
+ anyio = None # type: ignore
21
+
22
+
23
+ class AsyncLock:
24
+ def __init__(self) -> None:
25
+ self._backend = ""
26
+
27
+ def setup(self) -> None:
28
+ """
29
+ Detect if we're running under 'asyncio' or 'trio' and create
30
+ a lock with the correct implementation.
31
+ """
32
+ self._backend = sniffio.current_async_library()
33
+ if self._backend == "trio":
34
+ if trio is None: # pragma: nocover
35
+ raise RuntimeError(
36
+ "Running under trio, requires the 'trio' package to be installed."
37
+ )
38
+ self._trio_lock = trio.Lock()
39
+ else:
40
+ if anyio is None: # pragma: nocover
41
+ raise RuntimeError(
42
+ "Running under asyncio requires the 'anyio' package to be installed."
43
+ )
44
+ self._anyio_lock = anyio.Lock()
45
+
46
+ async def __aenter__(self) -> "AsyncLock":
47
+ if not self._backend:
48
+ self.setup()
49
+
50
+ if self._backend == "trio":
51
+ await self._trio_lock.acquire()
52
+ else:
53
+ await self._anyio_lock.acquire()
54
+
55
+ return self
56
+
57
+ async def __aexit__(
58
+ self,
59
+ exc_type: Optional[Type[BaseException]] = None,
60
+ exc_value: Optional[BaseException] = None,
61
+ traceback: Optional[TracebackType] = None,
62
+ ) -> None:
63
+ if self._backend == "trio":
64
+ self._trio_lock.release()
65
+ else:
66
+ self._anyio_lock.release()
67
+
68
+
69
+ class AsyncEvent:
70
+ def __init__(self) -> None:
71
+ self._backend = ""
72
+
73
+ def setup(self) -> None:
74
+ """
75
+ Detect if we're running under 'asyncio' or 'trio' and create
76
+ a lock with the correct implementation.
77
+ """
78
+ self._backend = sniffio.current_async_library()
79
+ if self._backend == "trio":
80
+ if trio is None: # pragma: nocover
81
+ raise RuntimeError(
82
+ "Running under trio requires the 'trio' package to be installed."
83
+ )
84
+ self._trio_event = trio.Event()
85
+ else:
86
+ if anyio is None: # pragma: nocover
87
+ raise RuntimeError(
88
+ "Running under asyncio requires the 'anyio' package to be installed."
89
+ )
90
+ self._anyio_event = anyio.Event()
91
+
92
+ def set(self) -> None:
93
+ if not self._backend:
94
+ self.setup()
95
+
96
+ if self._backend == "trio":
97
+ self._trio_event.set()
98
+ else:
99
+ self._anyio_event.set()
100
+
101
+ async def wait(self, timeout: Optional[float] = None) -> None:
102
+ if not self._backend:
103
+ self.setup()
104
+
105
+ if self._backend == "trio":
106
+ if trio is None: # pragma: nocover
107
+ raise RuntimeError(
108
+ "Running under trio requires the 'trio' package to be installed."
109
+ )
110
+
111
+ trio_exc_map: ExceptionMapping = {trio.TooSlowError: PoolTimeout}
112
+ timeout_or_inf = float("inf") if timeout is None else timeout
113
+ with map_exceptions(trio_exc_map):
114
+ with trio.fail_after(timeout_or_inf):
115
+ await self._trio_event.wait()
116
+ else:
117
+ if anyio is None: # pragma: nocover
118
+ raise RuntimeError(
119
+ "Running under asyncio requires the 'anyio' package to be installed."
120
+ )
121
+
122
+ anyio_exc_map: ExceptionMapping = {TimeoutError: PoolTimeout}
123
+ with map_exceptions(anyio_exc_map):
124
+ with anyio.fail_after(timeout):
125
+ await self._anyio_event.wait()
126
+
127
+
128
+ class AsyncSemaphore:
129
+ def __init__(self, bound: int) -> None:
130
+ self._bound = bound
131
+ self._backend = ""
132
+
133
+ def setup(self) -> None:
134
+ """
135
+ Detect if we're running under 'asyncio' or 'trio' and create
136
+ a semaphore with the correct implementation.
137
+ """
138
+ self._backend = sniffio.current_async_library()
139
+ if self._backend == "trio":
140
+ if trio is None: # pragma: nocover
141
+ raise RuntimeError(
142
+ "Running under trio requires the 'trio' package to be installed."
143
+ )
144
+
145
+ self._trio_semaphore = trio.Semaphore(
146
+ initial_value=self._bound, max_value=self._bound
147
+ )
148
+ else:
149
+ if anyio is None: # pragma: nocover
150
+ raise RuntimeError(
151
+ "Running under asyncio requires the 'anyio' package to be installed."
152
+ )
153
+
154
+ self._anyio_semaphore = anyio.Semaphore(
155
+ initial_value=self._bound, max_value=self._bound
156
+ )
157
+
158
+ async def acquire(self) -> None:
159
+ if not self._backend:
160
+ self.setup()
161
+
162
+ if self._backend == "trio":
163
+ await self._trio_semaphore.acquire()
164
+ else:
165
+ await self._anyio_semaphore.acquire()
166
+
167
+ async def release(self) -> None:
168
+ if self._backend == "trio":
169
+ self._trio_semaphore.release()
170
+ else:
171
+ self._anyio_semaphore.release()
172
+
173
+
174
+ class AsyncShieldCancellation:
175
+ # For certain portions of our codebase where we're dealing with
176
+ # closing connections during exception handling we want to shield
177
+ # the operation from being cancelled.
178
+ #
179
+ # with AsyncShieldCancellation():
180
+ # ... # clean-up operations, shielded from cancellation.
181
+
182
+ def __init__(self) -> None:
183
+ """
184
+ Detect if we're running under 'asyncio' or 'trio' and create
185
+ a shielded scope with the correct implementation.
186
+ """
187
+ self._backend = sniffio.current_async_library()
188
+
189
+ if self._backend == "trio":
190
+ if trio is None: # pragma: nocover
191
+ raise RuntimeError(
192
+ "Running under trio requires the 'trio' package to be installed."
193
+ )
194
+
195
+ self._trio_shield = trio.CancelScope(shield=True)
196
+ else:
197
+ if anyio is None: # pragma: nocover
198
+ raise RuntimeError(
199
+ "Running under asyncio requires the 'anyio' package to be installed."
200
+ )
201
+
202
+ self._anyio_shield = anyio.CancelScope(shield=True)
203
+
204
+ def __enter__(self) -> "AsyncShieldCancellation":
205
+ if self._backend == "trio":
206
+ self._trio_shield.__enter__()
207
+ else:
208
+ self._anyio_shield.__enter__()
209
+ return self
210
+
211
+ def __exit__(
212
+ self,
213
+ exc_type: Optional[Type[BaseException]] = None,
214
+ exc_value: Optional[BaseException] = None,
215
+ traceback: Optional[TracebackType] = None,
216
+ ) -> None:
217
+ if self._backend == "trio":
218
+ self._trio_shield.__exit__(exc_type, exc_value, traceback)
219
+ else:
220
+ self._anyio_shield.__exit__(exc_type, exc_value, traceback)
221
+
222
+
223
+ # Our thread-based synchronization primitives...
224
+
225
+
226
+ class Lock:
227
+ def __init__(self) -> None:
228
+ self._lock = threading.Lock()
229
+
230
+ def __enter__(self) -> "Lock":
231
+ self._lock.acquire()
232
+ return self
233
+
234
+ def __exit__(
235
+ self,
236
+ exc_type: Optional[Type[BaseException]] = None,
237
+ exc_value: Optional[BaseException] = None,
238
+ traceback: Optional[TracebackType] = None,
239
+ ) -> None:
240
+ self._lock.release()
241
+
242
+
243
+ class Event:
244
+ def __init__(self) -> None:
245
+ self._event = threading.Event()
246
+
247
+ def set(self) -> None:
248
+ self._event.set()
249
+
250
+ def wait(self, timeout: Optional[float] = None) -> None:
251
+ if not self._event.wait(timeout=timeout):
252
+ raise PoolTimeout() # pragma: nocover
253
+
254
+
255
+ class Semaphore:
256
+ def __init__(self, bound: int) -> None:
257
+ self._semaphore = threading.Semaphore(value=bound)
258
+
259
+ def acquire(self) -> None:
260
+ self._semaphore.acquire()
261
+
262
+ def release(self) -> None:
263
+ self._semaphore.release()
264
+
265
+
266
+ class ShieldCancellation:
267
+ # Thread-synchronous codebases don't support cancellation semantics.
268
+ # We have this class because we need to mirror the async and sync
269
+ # cases within our package, but it's just a no-op.
270
+ def __enter__(self) -> "ShieldCancellation":
271
+ return self
272
+
273
+ def __exit__(
274
+ self,
275
+ exc_type: Optional[Type[BaseException]] = None,
276
+ exc_value: Optional[BaseException] = None,
277
+ traceback: Optional[TracebackType] = None,
278
+ ) -> None:
279
+ pass
evalkit_internvl/lib/python3.10/site-packages/httpcore/_trace.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import logging
3
+ from types import TracebackType
4
+ from typing import Any, Dict, Optional, Type
5
+
6
+ from ._models import Request
7
+
8
+
9
+ class Trace:
10
+ def __init__(
11
+ self,
12
+ name: str,
13
+ logger: logging.Logger,
14
+ request: Optional[Request] = None,
15
+ kwargs: Optional[Dict[str, Any]] = None,
16
+ ) -> None:
17
+ self.name = name
18
+ self.logger = logger
19
+ self.trace_extension = (
20
+ None if request is None else request.extensions.get("trace")
21
+ )
22
+ self.debug = self.logger.isEnabledFor(logging.DEBUG)
23
+ self.kwargs = kwargs or {}
24
+ self.return_value: Any = None
25
+ self.should_trace = self.debug or self.trace_extension is not None
26
+ self.prefix = self.logger.name.split(".")[-1]
27
+
28
+ def trace(self, name: str, info: Dict[str, Any]) -> None:
29
+ if self.trace_extension is not None:
30
+ prefix_and_name = f"{self.prefix}.{name}"
31
+ ret = self.trace_extension(prefix_and_name, info)
32
+ if inspect.iscoroutine(ret): # pragma: no cover
33
+ raise TypeError(
34
+ "If you are using a synchronous interface, "
35
+ "the callback of the `trace` extension should "
36
+ "be a normal function instead of an asynchronous function."
37
+ )
38
+
39
+ if self.debug:
40
+ if not info or "return_value" in info and info["return_value"] is None:
41
+ message = name
42
+ else:
43
+ args = " ".join([f"{key}={value!r}" for key, value in info.items()])
44
+ message = f"{name} {args}"
45
+ self.logger.debug(message)
46
+
47
+ def __enter__(self) -> "Trace":
48
+ if self.should_trace:
49
+ info = self.kwargs
50
+ self.trace(f"{self.name}.started", info)
51
+ return self
52
+
53
+ def __exit__(
54
+ self,
55
+ exc_type: Optional[Type[BaseException]] = None,
56
+ exc_value: Optional[BaseException] = None,
57
+ traceback: Optional[TracebackType] = None,
58
+ ) -> None:
59
+ if self.should_trace:
60
+ if exc_value is None:
61
+ info = {"return_value": self.return_value}
62
+ self.trace(f"{self.name}.complete", info)
63
+ else:
64
+ info = {"exception": exc_value}
65
+ self.trace(f"{self.name}.failed", info)
66
+
67
+ async def atrace(self, name: str, info: Dict[str, Any]) -> None:
68
+ if self.trace_extension is not None:
69
+ prefix_and_name = f"{self.prefix}.{name}"
70
+ coro = self.trace_extension(prefix_and_name, info)
71
+ if not inspect.iscoroutine(coro): # pragma: no cover
72
+ raise TypeError(
73
+ "If you're using an asynchronous interface, "
74
+ "the callback of the `trace` extension should "
75
+ "be an asynchronous function rather than a normal function."
76
+ )
77
+ await coro
78
+
79
+ if self.debug:
80
+ if not info or "return_value" in info and info["return_value"] is None:
81
+ message = name
82
+ else:
83
+ args = " ".join([f"{key}={value!r}" for key, value in info.items()])
84
+ message = f"{name} {args}"
85
+ self.logger.debug(message)
86
+
87
+ async def __aenter__(self) -> "Trace":
88
+ if self.should_trace:
89
+ info = self.kwargs
90
+ await self.atrace(f"{self.name}.started", info)
91
+ return self
92
+
93
+ async def __aexit__(
94
+ self,
95
+ exc_type: Optional[Type[BaseException]] = None,
96
+ exc_value: Optional[BaseException] = None,
97
+ traceback: Optional[TracebackType] = None,
98
+ ) -> None:
99
+ if self.should_trace:
100
+ if exc_value is None:
101
+ info = {"return_value": self.return_value}
102
+ await self.atrace(f"{self.name}.complete", info)
103
+ else:
104
+ info = {"exception": exc_value}
105
+ await self.atrace(f"{self.name}.failed", info)
evalkit_internvl/lib/python3.10/site-packages/httpcore/_utils.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import select
2
+ import socket
3
+ import sys
4
+ import typing
5
+
6
+
7
+ def is_socket_readable(sock: typing.Optional[socket.socket]) -> bool:
8
+ """
9
+ Return whether a socket, as identifed by its file descriptor, is readable.
10
+ "A socket is readable" means that the read buffer isn't empty, i.e. that calling
11
+ .recv() on it would immediately return some data.
12
+ """
13
+ # NOTE: we want check for readability without actually attempting to read, because
14
+ # we don't want to block forever if it's not readable.
15
+
16
+ # In the case that the socket no longer exists, or cannot return a file
17
+ # descriptor, we treat it as being readable, as if it the next read operation
18
+ # on it is ready to return the terminating `b""`.
19
+ sock_fd = None if sock is None else sock.fileno()
20
+ if sock_fd is None or sock_fd < 0: # pragma: nocover
21
+ return True
22
+
23
+ # The implementation below was stolen from:
24
+ # https://github.com/python-trio/trio/blob/20ee2b1b7376db637435d80e266212a35837ddcc/trio/_socket.py#L471-L478
25
+ # See also: https://github.com/encode/httpcore/pull/193#issuecomment-703129316
26
+
27
+ # Use select.select on Windows, and when poll is unavailable and select.poll
28
+ # everywhere else. (E.g. When eventlet is in use. See #327)
29
+ if (
30
+ sys.platform == "win32" or getattr(select, "poll", None) is None
31
+ ): # pragma: nocover
32
+ rready, _, _ = select.select([sock_fd], [], [], 0)
33
+ return bool(rready)
34
+ p = select.poll()
35
+ p.register(sock_fd, select.POLLIN)
36
+ return bool(p.poll(0))
evalkit_internvl/lib/python3.10/site-packages/httpcore/py.typed ADDED
File without changes
evalkit_internvl/lib/python3.10/site-packages/pexpect/ANSI.py ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''This implements an ANSI (VT100) terminal emulator as a subclass of screen.
2
+
3
+ PEXPECT LICENSE
4
+
5
+ This license is approved by the OSI and FSF as GPL-compatible.
6
+ http://opensource.org/licenses/isc-license.txt
7
+
8
+ Copyright (c) 2012, Noah Spurrier <noah@noah.org>
9
+ PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
10
+ PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
11
+ COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
12
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15
+ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
+
20
+ '''
21
+
22
+ # references:
23
+ # http://en.wikipedia.org/wiki/ANSI_escape_code
24
+ # http://www.retards.org/terminals/vt102.html
25
+ # http://vt100.net/docs/vt102-ug/contents.html
26
+ # http://vt100.net/docs/vt220-rm/
27
+ # http://www.termsys.demon.co.uk/vtansi.htm
28
+
29
+ from . import screen
30
+ from . import FSM
31
+ import string
32
+
33
+ #
34
+ # The 'Do.*' functions are helper functions for the ANSI class.
35
+ #
36
+ def DoEmit (fsm):
37
+
38
+ screen = fsm.memory[0]
39
+ screen.write_ch(fsm.input_symbol)
40
+
41
+ def DoStartNumber (fsm):
42
+
43
+ fsm.memory.append (fsm.input_symbol)
44
+
45
+ def DoBuildNumber (fsm):
46
+
47
+ ns = fsm.memory.pop()
48
+ ns = ns + fsm.input_symbol
49
+ fsm.memory.append (ns)
50
+
51
+ def DoBackOne (fsm):
52
+
53
+ screen = fsm.memory[0]
54
+ screen.cursor_back ()
55
+
56
+ def DoBack (fsm):
57
+
58
+ count = int(fsm.memory.pop())
59
+ screen = fsm.memory[0]
60
+ screen.cursor_back (count)
61
+
62
+ def DoDownOne (fsm):
63
+
64
+ screen = fsm.memory[0]
65
+ screen.cursor_down ()
66
+
67
+ def DoDown (fsm):
68
+
69
+ count = int(fsm.memory.pop())
70
+ screen = fsm.memory[0]
71
+ screen.cursor_down (count)
72
+
73
+ def DoForwardOne (fsm):
74
+
75
+ screen = fsm.memory[0]
76
+ screen.cursor_forward ()
77
+
78
+ def DoForward (fsm):
79
+
80
+ count = int(fsm.memory.pop())
81
+ screen = fsm.memory[0]
82
+ screen.cursor_forward (count)
83
+
84
+ def DoUpReverse (fsm):
85
+
86
+ screen = fsm.memory[0]
87
+ screen.cursor_up_reverse()
88
+
89
+ def DoUpOne (fsm):
90
+
91
+ screen = fsm.memory[0]
92
+ screen.cursor_up ()
93
+
94
+ def DoUp (fsm):
95
+
96
+ count = int(fsm.memory.pop())
97
+ screen = fsm.memory[0]
98
+ screen.cursor_up (count)
99
+
100
+ def DoHome (fsm):
101
+
102
+ c = int(fsm.memory.pop())
103
+ r = int(fsm.memory.pop())
104
+ screen = fsm.memory[0]
105
+ screen.cursor_home (r,c)
106
+
107
+ def DoHomeOrigin (fsm):
108
+
109
+ c = 1
110
+ r = 1
111
+ screen = fsm.memory[0]
112
+ screen.cursor_home (r,c)
113
+
114
+ def DoEraseDown (fsm):
115
+
116
+ screen = fsm.memory[0]
117
+ screen.erase_down()
118
+
119
+ def DoErase (fsm):
120
+
121
+ arg = int(fsm.memory.pop())
122
+ screen = fsm.memory[0]
123
+ if arg == 0:
124
+ screen.erase_down()
125
+ elif arg == 1:
126
+ screen.erase_up()
127
+ elif arg == 2:
128
+ screen.erase_screen()
129
+
130
+ def DoEraseEndOfLine (fsm):
131
+
132
+ screen = fsm.memory[0]
133
+ screen.erase_end_of_line()
134
+
135
+ def DoEraseLine (fsm):
136
+
137
+ arg = int(fsm.memory.pop())
138
+ screen = fsm.memory[0]
139
+ if arg == 0:
140
+ screen.erase_end_of_line()
141
+ elif arg == 1:
142
+ screen.erase_start_of_line()
143
+ elif arg == 2:
144
+ screen.erase_line()
145
+
146
+ def DoEnableScroll (fsm):
147
+
148
+ screen = fsm.memory[0]
149
+ screen.scroll_screen()
150
+
151
+ def DoCursorSave (fsm):
152
+
153
+ screen = fsm.memory[0]
154
+ screen.cursor_save_attrs()
155
+
156
+ def DoCursorRestore (fsm):
157
+
158
+ screen = fsm.memory[0]
159
+ screen.cursor_restore_attrs()
160
+
161
+ def DoScrollRegion (fsm):
162
+
163
+ screen = fsm.memory[0]
164
+ r2 = int(fsm.memory.pop())
165
+ r1 = int(fsm.memory.pop())
166
+ screen.scroll_screen_rows (r1,r2)
167
+
168
+ def DoMode (fsm):
169
+
170
+ screen = fsm.memory[0]
171
+ mode = fsm.memory.pop() # Should be 4
172
+ # screen.setReplaceMode ()
173
+
174
+ def DoLog (fsm):
175
+
176
+ screen = fsm.memory[0]
177
+ fsm.memory = [screen]
178
+ fout = open ('log', 'a')
179
+ fout.write (fsm.input_symbol + ',' + fsm.current_state + '\n')
180
+ fout.close()
181
+
182
+ class term (screen.screen):
183
+
184
+ '''This class is an abstract, generic terminal.
185
+ This does nothing. This is a placeholder that
186
+ provides a common base class for other terminals
187
+ such as an ANSI terminal. '''
188
+
189
+ def __init__ (self, r=24, c=80, *args, **kwargs):
190
+
191
+ screen.screen.__init__(self, r,c,*args,**kwargs)
192
+
193
+ class ANSI (term):
194
+ '''This class implements an ANSI (VT100) terminal.
195
+ It is a stream filter that recognizes ANSI terminal
196
+ escape sequences and maintains the state of a screen object. '''
197
+
198
+ def __init__ (self, r=24,c=80,*args,**kwargs):
199
+
200
+ term.__init__(self,r,c,*args,**kwargs)
201
+
202
+ #self.screen = screen (24,80)
203
+ self.state = FSM.FSM ('INIT',[self])
204
+ self.state.set_default_transition (DoLog, 'INIT')
205
+ self.state.add_transition_any ('INIT', DoEmit, 'INIT')
206
+ self.state.add_transition ('\x1b', 'INIT', None, 'ESC')
207
+ self.state.add_transition_any ('ESC', DoLog, 'INIT')
208
+ self.state.add_transition ('(', 'ESC', None, 'G0SCS')
209
+ self.state.add_transition (')', 'ESC', None, 'G1SCS')
210
+ self.state.add_transition_list ('AB012', 'G0SCS', None, 'INIT')
211
+ self.state.add_transition_list ('AB012', 'G1SCS', None, 'INIT')
212
+ self.state.add_transition ('7', 'ESC', DoCursorSave, 'INIT')
213
+ self.state.add_transition ('8', 'ESC', DoCursorRestore, 'INIT')
214
+ self.state.add_transition ('M', 'ESC', DoUpReverse, 'INIT')
215
+ self.state.add_transition ('>', 'ESC', DoUpReverse, 'INIT')
216
+ self.state.add_transition ('<', 'ESC', DoUpReverse, 'INIT')
217
+ self.state.add_transition ('=', 'ESC', None, 'INIT') # Selects application keypad.
218
+ self.state.add_transition ('#', 'ESC', None, 'GRAPHICS_POUND')
219
+ self.state.add_transition_any ('GRAPHICS_POUND', None, 'INIT')
220
+ self.state.add_transition ('[', 'ESC', None, 'ELB')
221
+ # ELB means Escape Left Bracket. That is ^[[
222
+ self.state.add_transition ('H', 'ELB', DoHomeOrigin, 'INIT')
223
+ self.state.add_transition ('D', 'ELB', DoBackOne, 'INIT')
224
+ self.state.add_transition ('B', 'ELB', DoDownOne, 'INIT')
225
+ self.state.add_transition ('C', 'ELB', DoForwardOne, 'INIT')
226
+ self.state.add_transition ('A', 'ELB', DoUpOne, 'INIT')
227
+ self.state.add_transition ('J', 'ELB', DoEraseDown, 'INIT')
228
+ self.state.add_transition ('K', 'ELB', DoEraseEndOfLine, 'INIT')
229
+ self.state.add_transition ('r', 'ELB', DoEnableScroll, 'INIT')
230
+ self.state.add_transition ('m', 'ELB', self.do_sgr, 'INIT')
231
+ self.state.add_transition ('?', 'ELB', None, 'MODECRAP')
232
+ self.state.add_transition_list (string.digits, 'ELB', DoStartNumber, 'NUMBER_1')
233
+ self.state.add_transition_list (string.digits, 'NUMBER_1', DoBuildNumber, 'NUMBER_1')
234
+ self.state.add_transition ('D', 'NUMBER_1', DoBack, 'INIT')
235
+ self.state.add_transition ('B', 'NUMBER_1', DoDown, 'INIT')
236
+ self.state.add_transition ('C', 'NUMBER_1', DoForward, 'INIT')
237
+ self.state.add_transition ('A', 'NUMBER_1', DoUp, 'INIT')
238
+ self.state.add_transition ('J', 'NUMBER_1', DoErase, 'INIT')
239
+ self.state.add_transition ('K', 'NUMBER_1', DoEraseLine, 'INIT')
240
+ self.state.add_transition ('l', 'NUMBER_1', DoMode, 'INIT')
241
+ ### It gets worse... the 'm' code can have infinite number of
242
+ ### number;number;number before it. I've never seen more than two,
243
+ ### but the specs say it's allowed. crap!
244
+ self.state.add_transition ('m', 'NUMBER_1', self.do_sgr, 'INIT')
245
+ ### LED control. Same implementation problem as 'm' code.
246
+ self.state.add_transition ('q', 'NUMBER_1', self.do_decsca, 'INIT')
247
+
248
+ # \E[?47h switch to alternate screen
249
+ # \E[?47l restores to normal screen from alternate screen.
250
+ self.state.add_transition_list (string.digits, 'MODECRAP', DoStartNumber, 'MODECRAP_NUM')
251
+ self.state.add_transition_list (string.digits, 'MODECRAP_NUM', DoBuildNumber, 'MODECRAP_NUM')
252
+ self.state.add_transition ('l', 'MODECRAP_NUM', self.do_modecrap, 'INIT')
253
+ self.state.add_transition ('h', 'MODECRAP_NUM', self.do_modecrap, 'INIT')
254
+
255
+ #RM Reset Mode Esc [ Ps l none
256
+ self.state.add_transition (';', 'NUMBER_1', None, 'SEMICOLON')
257
+ self.state.add_transition_any ('SEMICOLON', DoLog, 'INIT')
258
+ self.state.add_transition_list (string.digits, 'SEMICOLON', DoStartNumber, 'NUMBER_2')
259
+ self.state.add_transition_list (string.digits, 'NUMBER_2', DoBuildNumber, 'NUMBER_2')
260
+ self.state.add_transition_any ('NUMBER_2', DoLog, 'INIT')
261
+ self.state.add_transition ('H', 'NUMBER_2', DoHome, 'INIT')
262
+ self.state.add_transition ('f', 'NUMBER_2', DoHome, 'INIT')
263
+ self.state.add_transition ('r', 'NUMBER_2', DoScrollRegion, 'INIT')
264
+ ### It gets worse... the 'm' code can have infinite number of
265
+ ### number;number;number before it. I've never seen more than two,
266
+ ### but the specs say it's allowed. crap!
267
+ self.state.add_transition ('m', 'NUMBER_2', self.do_sgr, 'INIT')
268
+ ### LED control. Same problem as 'm' code.
269
+ self.state.add_transition ('q', 'NUMBER_2', self.do_decsca, 'INIT')
270
+ self.state.add_transition (';', 'NUMBER_2', None, 'SEMICOLON_X')
271
+
272
+ # Create a state for 'q' and 'm' which allows an infinite number of ignored numbers
273
+ self.state.add_transition_any ('SEMICOLON_X', DoLog, 'INIT')
274
+ self.state.add_transition_list (string.digits, 'SEMICOLON_X', DoStartNumber, 'NUMBER_X')
275
+ self.state.add_transition_list (string.digits, 'NUMBER_X', DoBuildNumber, 'NUMBER_X')
276
+ self.state.add_transition_any ('NUMBER_X', DoLog, 'INIT')
277
+ self.state.add_transition ('m', 'NUMBER_X', self.do_sgr, 'INIT')
278
+ self.state.add_transition ('q', 'NUMBER_X', self.do_decsca, 'INIT')
279
+ self.state.add_transition (';', 'NUMBER_X', None, 'SEMICOLON_X')
280
+
281
+ def process (self, c):
282
+ """Process a single character. Called by :meth:`write`."""
283
+ if isinstance(c, bytes):
284
+ c = self._decode(c)
285
+ self.state.process(c)
286
+
287
+ def process_list (self, l):
288
+
289
+ self.write(l)
290
+
291
+ def write (self, s):
292
+ """Process text, writing it to the virtual screen while handling
293
+ ANSI escape codes.
294
+ """
295
+ if isinstance(s, bytes):
296
+ s = self._decode(s)
297
+ for c in s:
298
+ self.process(c)
299
+
300
+ def flush (self):
301
+ pass
302
+
303
+ def write_ch (self, ch):
304
+ '''This puts a character at the current cursor position. The cursor
305
+ position is moved forward with wrap-around, but no scrolling is done if
306
+ the cursor hits the lower-right corner of the screen. '''
307
+
308
+ if isinstance(ch, bytes):
309
+ ch = self._decode(ch)
310
+
311
+ #\r and \n both produce a call to cr() and lf(), respectively.
312
+ ch = ch[0]
313
+
314
+ if ch == u'\r':
315
+ self.cr()
316
+ return
317
+ if ch == u'\n':
318
+ self.crlf()
319
+ return
320
+ if ch == chr(screen.BS):
321
+ self.cursor_back()
322
+ return
323
+ self.put_abs(self.cur_r, self.cur_c, ch)
324
+ old_r = self.cur_r
325
+ old_c = self.cur_c
326
+ self.cursor_forward()
327
+ if old_c == self.cur_c:
328
+ self.cursor_down()
329
+ if old_r != self.cur_r:
330
+ self.cursor_home (self.cur_r, 1)
331
+ else:
332
+ self.scroll_up ()
333
+ self.cursor_home (self.cur_r, 1)
334
+ self.erase_line()
335
+
336
+ def do_sgr (self, fsm):
337
+ '''Select Graphic Rendition, e.g. color. '''
338
+ screen = fsm.memory[0]
339
+ fsm.memory = [screen]
340
+
341
+ def do_decsca (self, fsm):
342
+ '''Select character protection attribute. '''
343
+ screen = fsm.memory[0]
344
+ fsm.memory = [screen]
345
+
346
+ def do_modecrap (self, fsm):
347
+ '''Handler for \x1b[?<number>h and \x1b[?<number>l. If anyone
348
+ wanted to actually use these, they'd need to add more states to the
349
+ FSM rather than just improve or override this method. '''
350
+ screen = fsm.memory[0]
351
+ fsm.memory = [screen]
evalkit_internvl/lib/python3.10/site-packages/pexpect/FSM.py ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ '''This module implements a Finite State Machine (FSM). In addition to state
4
+ this FSM also maintains a user defined "memory". So this FSM can be used as a
5
+ Push-down Automata (PDA) since a PDA is a FSM + memory.
6
+
7
+ The following describes how the FSM works, but you will probably also need to
8
+ see the example function to understand how the FSM is used in practice.
9
+
10
+ You define an FSM by building tables of transitions. For a given input symbol
11
+ the process() method uses these tables to decide what action to call and what
12
+ the next state will be. The FSM has a table of transitions that associate:
13
+
14
+ (input_symbol, current_state) --> (action, next_state)
15
+
16
+ Where "action" is a function you define. The symbols and states can be any
17
+ objects. You use the add_transition() and add_transition_list() methods to add
18
+ to the transition table. The FSM also has a table of transitions that
19
+ associate:
20
+
21
+ (current_state) --> (action, next_state)
22
+
23
+ You use the add_transition_any() method to add to this transition table. The
24
+ FSM also has one default transition that is not associated with any specific
25
+ input_symbol or state. You use the set_default_transition() method to set the
26
+ default transition.
27
+
28
+ When an action function is called it is passed a reference to the FSM. The
29
+ action function may then access attributes of the FSM such as input_symbol,
30
+ current_state, or "memory". The "memory" attribute can be any object that you
31
+ want to pass along to the action functions. It is not used by the FSM itself.
32
+ For parsing you would typically pass a list to be used as a stack.
33
+
34
+ The processing sequence is as follows. The process() method is given an
35
+ input_symbol to process. The FSM will search the table of transitions that
36
+ associate:
37
+
38
+ (input_symbol, current_state) --> (action, next_state)
39
+
40
+ If the pair (input_symbol, current_state) is found then process() will call the
41
+ associated action function and then set the current state to the next_state.
42
+
43
+ If the FSM cannot find a match for (input_symbol, current_state) it will then
44
+ search the table of transitions that associate:
45
+
46
+ (current_state) --> (action, next_state)
47
+
48
+ If the current_state is found then the process() method will call the
49
+ associated action function and then set the current state to the next_state.
50
+ Notice that this table lacks an input_symbol. It lets you define transitions
51
+ for a current_state and ANY input_symbol. Hence, it is called the "any" table.
52
+ Remember, it is always checked after first searching the table for a specific
53
+ (input_symbol, current_state).
54
+
55
+ For the case where the FSM did not match either of the previous two cases the
56
+ FSM will try to use the default transition. If the default transition is
57
+ defined then the process() method will call the associated action function and
58
+ then set the current state to the next_state. This lets you define a default
59
+ transition as a catch-all case. You can think of it as an exception handler.
60
+ There can be only one default transition.
61
+
62
+ Finally, if none of the previous cases are defined for an input_symbol and
63
+ current_state then the FSM will raise an exception. This may be desirable, but
64
+ you can always prevent this just by defining a default transition.
65
+
66
+ Noah Spurrier 20020822
67
+
68
+ PEXPECT LICENSE
69
+
70
+ This license is approved by the OSI and FSF as GPL-compatible.
71
+ http://opensource.org/licenses/isc-license.txt
72
+
73
+ Copyright (c) 2012, Noah Spurrier <noah@noah.org>
74
+ PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
75
+ PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
76
+ COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
77
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
78
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
79
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
80
+ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
81
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
82
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
83
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
84
+
85
+ '''
86
+
87
+ class ExceptionFSM(Exception):
88
+
89
+ '''This is the FSM Exception class.'''
90
+
91
+ def __init__(self, value):
92
+ self.value = value
93
+
94
+ def __str__(self):
95
+ return 'ExceptionFSM: ' + str(self.value)
96
+
97
+ class FSM:
98
+
99
+ '''This is a Finite State Machine (FSM).
100
+ '''
101
+
102
+ def __init__(self, initial_state, memory=None):
103
+
104
+ '''This creates the FSM. You set the initial state here. The "memory"
105
+ attribute is any object that you want to pass along to the action
106
+ functions. It is not used by the FSM. For parsing you would typically
107
+ pass a list to be used as a stack. '''
108
+
109
+ # Map (input_symbol, current_state) --> (action, next_state).
110
+ self.state_transitions = {}
111
+ # Map (current_state) --> (action, next_state).
112
+ self.state_transitions_any = {}
113
+ self.default_transition = None
114
+
115
+ self.input_symbol = None
116
+ self.initial_state = initial_state
117
+ self.current_state = self.initial_state
118
+ self.next_state = None
119
+ self.action = None
120
+ self.memory = memory
121
+
122
+ def reset (self):
123
+
124
+ '''This sets the current_state to the initial_state and sets
125
+ input_symbol to None. The initial state was set by the constructor
126
+ __init__(). '''
127
+
128
+ self.current_state = self.initial_state
129
+ self.input_symbol = None
130
+
131
+ def add_transition (self, input_symbol, state, action=None, next_state=None):
132
+
133
+ '''This adds a transition that associates:
134
+
135
+ (input_symbol, current_state) --> (action, next_state)
136
+
137
+ The action may be set to None in which case the process() method will
138
+ ignore the action and only set the next_state. The next_state may be
139
+ set to None in which case the current state will be unchanged.
140
+
141
+ You can also set transitions for a list of symbols by using
142
+ add_transition_list(). '''
143
+
144
+ if next_state is None:
145
+ next_state = state
146
+ self.state_transitions[(input_symbol, state)] = (action, next_state)
147
+
148
+ def add_transition_list (self, list_input_symbols, state, action=None, next_state=None):
149
+
150
+ '''This adds the same transition for a list of input symbols.
151
+ You can pass a list or a string. Note that it is handy to use
152
+ string.digits, string.whitespace, string.letters, etc. to add
153
+ transitions that match character classes.
154
+
155
+ The action may be set to None in which case the process() method will
156
+ ignore the action and only set the next_state. The next_state may be
157
+ set to None in which case the current state will be unchanged. '''
158
+
159
+ if next_state is None:
160
+ next_state = state
161
+ for input_symbol in list_input_symbols:
162
+ self.add_transition (input_symbol, state, action, next_state)
163
+
164
+ def add_transition_any (self, state, action=None, next_state=None):
165
+
166
+ '''This adds a transition that associates:
167
+
168
+ (current_state) --> (action, next_state)
169
+
170
+ That is, any input symbol will match the current state.
171
+ The process() method checks the "any" state associations after it first
172
+ checks for an exact match of (input_symbol, current_state).
173
+
174
+ The action may be set to None in which case the process() method will
175
+ ignore the action and only set the next_state. The next_state may be
176
+ set to None in which case the current state will be unchanged. '''
177
+
178
+ if next_state is None:
179
+ next_state = state
180
+ self.state_transitions_any [state] = (action, next_state)
181
+
182
+ def set_default_transition (self, action, next_state):
183
+
184
+ '''This sets the default transition. This defines an action and
185
+ next_state if the FSM cannot find the input symbol and the current
186
+ state in the transition list and if the FSM cannot find the
187
+ current_state in the transition_any list. This is useful as a final
188
+ fall-through state for catching errors and undefined states.
189
+
190
+ The default transition can be removed by setting the attribute
191
+ default_transition to None. '''
192
+
193
+ self.default_transition = (action, next_state)
194
+
195
+ def get_transition (self, input_symbol, state):
196
+
197
+ '''This returns (action, next state) given an input_symbol and state.
198
+ This does not modify the FSM state, so calling this method has no side
199
+ effects. Normally you do not call this method directly. It is called by
200
+ process().
201
+
202
+ The sequence of steps to check for a defined transition goes from the
203
+ most specific to the least specific.
204
+
205
+ 1. Check state_transitions[] that match exactly the tuple,
206
+ (input_symbol, state)
207
+
208
+ 2. Check state_transitions_any[] that match (state)
209
+ In other words, match a specific state and ANY input_symbol.
210
+
211
+ 3. Check if the default_transition is defined.
212
+ This catches any input_symbol and any state.
213
+ This is a handler for errors, undefined states, or defaults.
214
+
215
+ 4. No transition was defined. If we get here then raise an exception.
216
+ '''
217
+
218
+ if (input_symbol, state) in self.state_transitions:
219
+ return self.state_transitions[(input_symbol, state)]
220
+ elif state in self.state_transitions_any:
221
+ return self.state_transitions_any[state]
222
+ elif self.default_transition is not None:
223
+ return self.default_transition
224
+ else:
225
+ raise ExceptionFSM ('Transition is undefined: (%s, %s).' %
226
+ (str(input_symbol), str(state)) )
227
+
228
+ def process (self, input_symbol):
229
+
230
+ '''This is the main method that you call to process input. This may
231
+ cause the FSM to change state and call an action. This method calls
232
+ get_transition() to find the action and next_state associated with the
233
+ input_symbol and current_state. If the action is None then the action
234
+ is not called and only the current state is changed. This method
235
+ processes one complete input symbol. You can process a list of symbols
236
+ (or a string) by calling process_list(). '''
237
+
238
+ self.input_symbol = input_symbol
239
+ (self.action, self.next_state) = self.get_transition (self.input_symbol, self.current_state)
240
+ if self.action is not None:
241
+ self.action (self)
242
+ self.current_state = self.next_state
243
+ self.next_state = None
244
+
245
+ def process_list (self, input_symbols):
246
+
247
+ '''This takes a list and sends each element to process(). The list may
248
+ be a string or any iterable object. '''
249
+
250
+ for s in input_symbols:
251
+ self.process (s)
252
+
253
+ ##############################################################################
254
+ # The following is an example that demonstrates the use of the FSM class to
255
+ # process an RPN expression. Run this module from the command line. You will
256
+ # get a prompt > for input. Enter an RPN Expression. Numbers may be integers.
257
+ # Operators are * / + - Use the = sign to evaluate and print the expression.
258
+ # For example:
259
+ #
260
+ # 167 3 2 2 * * * 1 - =
261
+ #
262
+ # will print:
263
+ #
264
+ # 2003
265
+ ##############################################################################
266
+
267
+ import sys
268
+ import string
269
+
270
+ PY3 = (sys.version_info[0] >= 3)
271
+
272
+ #
273
+ # These define the actions.
274
+ # Note that "memory" is a list being used as a stack.
275
+ #
276
+
277
+ def BeginBuildNumber (fsm):
278
+ fsm.memory.append (fsm.input_symbol)
279
+
280
+ def BuildNumber (fsm):
281
+ s = fsm.memory.pop ()
282
+ s = s + fsm.input_symbol
283
+ fsm.memory.append (s)
284
+
285
+ def EndBuildNumber (fsm):
286
+ s = fsm.memory.pop ()
287
+ fsm.memory.append (int(s))
288
+
289
+ def DoOperator (fsm):
290
+ ar = fsm.memory.pop()
291
+ al = fsm.memory.pop()
292
+ if fsm.input_symbol == '+':
293
+ fsm.memory.append (al + ar)
294
+ elif fsm.input_symbol == '-':
295
+ fsm.memory.append (al - ar)
296
+ elif fsm.input_symbol == '*':
297
+ fsm.memory.append (al * ar)
298
+ elif fsm.input_symbol == '/':
299
+ fsm.memory.append (al / ar)
300
+
301
+ def DoEqual (fsm):
302
+ print(str(fsm.memory.pop()))
303
+
304
+ def Error (fsm):
305
+ print('That does not compute.')
306
+ print(str(fsm.input_symbol))
307
+
308
+ def main():
309
+
310
+ '''This is where the example starts and the FSM state transitions are
311
+ defined. Note that states are strings (such as 'INIT'). This is not
312
+ necessary, but it makes the example easier to read. '''
313
+
314
+ f = FSM ('INIT', [])
315
+ f.set_default_transition (Error, 'INIT')
316
+ f.add_transition_any ('INIT', None, 'INIT')
317
+ f.add_transition ('=', 'INIT', DoEqual, 'INIT')
318
+ f.add_transition_list (string.digits, 'INIT', BeginBuildNumber, 'BUILDING_NUMBER')
319
+ f.add_transition_list (string.digits, 'BUILDING_NUMBER', BuildNumber, 'BUILDING_NUMBER')
320
+ f.add_transition_list (string.whitespace, 'BUILDING_NUMBER', EndBuildNumber, 'INIT')
321
+ f.add_transition_list ('+-*/', 'INIT', DoOperator, 'INIT')
322
+
323
+ print()
324
+ print('Enter an RPN Expression.')
325
+ print('Numbers may be integers. Operators are * / + -')
326
+ print('Use the = sign to evaluate and print the expression.')
327
+ print('For example: ')
328
+ print(' 167 3 2 2 * * * 1 - =')
329
+ inputstr = (input if PY3 else raw_input)('> ') # analysis:ignore
330
+ f.process_list(inputstr)
331
+
332
+
333
+ if __name__ == '__main__':
334
+ main()
evalkit_internvl/lib/python3.10/site-packages/pexpect/__init__.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''Pexpect is a Python module for spawning child applications and controlling
2
+ them automatically. Pexpect can be used for automating interactive applications
3
+ such as ssh, ftp, passwd, telnet, etc. It can be used to automate setup
4
+ scripts for duplicating software package installations on different servers. It
5
+ can be used for automated software testing. Pexpect is in the spirit of Don
6
+ Libes' Expect, but Pexpect is pure Python. Other Expect-like modules for Python
7
+ require TCL and Expect or require C extensions to be compiled. Pexpect does not
8
+ use C, Expect, or TCL extensions. It should work on any platform that supports
9
+ the standard Python pty module. The Pexpect interface focuses on ease of use so
10
+ that simple tasks are easy.
11
+
12
+ There are two main interfaces to the Pexpect system; these are the function,
13
+ run() and the class, spawn. The spawn class is more powerful. The run()
14
+ function is simpler than spawn, and is good for quickly calling program. When
15
+ you call the run() function it executes a given program and then returns the
16
+ output. This is a handy replacement for os.system().
17
+
18
+ For example::
19
+
20
+ pexpect.run('ls -la')
21
+
22
+ The spawn class is the more powerful interface to the Pexpect system. You can
23
+ use this to spawn a child program then interact with it by sending input and
24
+ expecting responses (waiting for patterns in the child's output).
25
+
26
+ For example::
27
+
28
+ child = pexpect.spawn('scp foo user@example.com:.')
29
+ child.expect('Password:')
30
+ child.sendline(mypassword)
31
+
32
+ Context manager can be used for the spawn() function::
33
+
34
+ with pexpect.spawn('scp foo user@example.com:.') as child:
35
+ child.expect('Password:')
36
+ child.sendline(mypassword)
37
+
38
+ This works even for commands that ask for passwords or other input outside of
39
+ the normal stdio streams. For example, ssh reads input directly from the TTY
40
+ device which bypasses stdin.
41
+
42
+ Credits: Noah Spurrier, Richard Holden, Marco Molteni, Kimberley Burchett,
43
+ Robert Stone, Hartmut Goebel, Chad Schroeder, Erick Tryzelaar, Dave Kirby, Ids
44
+ vander Molen, George Todd, Noel Taylor, Nicolas D. Cesar, Alexander Gattin,
45
+ Jacques-Etienne Baudoux, Geoffrey Marshall, Francisco Lourenco, Glen Mabey,
46
+ Karthik Gurusamy, Fernando Perez, Corey Minyard, Jon Cohen, Guillaume
47
+ Chazarain, Andrew Ryan, Nick Craig-Wood, Andrew Stone, Jorgen Grahn, John
48
+ Spiegel, Jan Grant, and Shane Kerr. Let me know if I forgot anyone.
49
+
50
+ Pexpect is free, open source, and all that good stuff.
51
+ http://pexpect.sourceforge.net/
52
+
53
+ PEXPECT LICENSE
54
+
55
+ This license is approved by the OSI and FSF as GPL-compatible.
56
+ http://opensource.org/licenses/isc-license.txt
57
+
58
+ Copyright (c) 2012, Noah Spurrier <noah@noah.org>
59
+ PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
60
+ PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
61
+ COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
62
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
63
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
64
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
65
+ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
66
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
67
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
68
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
69
+
70
+ '''
71
+
72
+ import sys
73
+ PY3 = (sys.version_info[0] >= 3)
74
+
75
+ from .exceptions import ExceptionPexpect, EOF, TIMEOUT
76
+ from .utils import split_command_line, which, is_executable_file
77
+ from .expect import Expecter, searcher_re, searcher_string
78
+
79
+ if sys.platform != 'win32':
80
+ # On Unix, these are available at the top level for backwards compatibility
81
+ from .pty_spawn import spawn, spawnu
82
+ from .run import run, runu
83
+
84
+ __version__ = '4.9.0'
85
+ __revision__ = ''
86
+ __all__ = ['ExceptionPexpect', 'EOF', 'TIMEOUT', 'spawn', 'spawnu', 'run', 'runu',
87
+ 'which', 'split_command_line', '__version__', '__revision__']
88
+
89
+
90
+
91
+ # vim: set shiftround expandtab tabstop=4 shiftwidth=4 ft=python autoindent :
evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/ANSI.cpython-310.pyc ADDED
Binary file (9.71 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/FSM.cpython-310.pyc ADDED
Binary file (12.8 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/_async_pre_await.cpython-310.pyc ADDED
Binary file (3.5 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/_async_w_await.cpython-310.pyc ADDED
Binary file (3.64 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/exceptions.cpython-310.pyc ADDED
Binary file (1.87 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/pty_spawn.cpython-310.pyc ADDED
Binary file (32.1 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/pxssh.cpython-310.pyc ADDED
Binary file (16.8 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/replwrap.cpython-310.pyc ADDED
Binary file (5 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/screen.cpython-310.pyc ADDED
Binary file (14.4 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/spawnbase.cpython-310.pyc ADDED
Binary file (17.8 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/pexpect/_async.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Facade that provides coroutines implementation pertinent to running Py version.
2
+
3
+ Python 3.5 introduced the async def/await syntax keyword.
4
+ With later versions coroutines and methods to get the running asyncio loop are
5
+ being deprecated, not supported anymore.
6
+
7
+ For Python versions later than 3.6, coroutines and objects that are defined via
8
+ ``async def``/``await`` keywords are imported.
9
+
10
+ Here the code is just imported, to provide the same interface to older code.
11
+ """
12
+ # pylint: disable=unused-import
13
+ # flake8: noqa: F401
14
+ from sys import version_info as py_version_info
15
+
16
+ # this assumes async def/await are more stable
17
+ if py_version_info >= (3, 6):
18
+ from pexpect._async_w_await import (
19
+ PatternWaiter,
20
+ expect_async,
21
+ repl_run_command_async,
22
+ )
23
+ else:
24
+ from pexpect._async_pre_await import (
25
+ PatternWaiter,
26
+ expect_async,
27
+ repl_run_command_async,
28
+ )
evalkit_internvl/lib/python3.10/site-packages/pexpect/_async_pre_await.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Implementation of coroutines without using ``async def``/``await`` keywords.
2
+
3
+ ``@asyncio.coroutine`` and ``yield from`` are used here instead.
4
+ """
5
+ import asyncio
6
+ import errno
7
+ import signal
8
+
9
+ from pexpect import EOF
10
+
11
+
12
+ @asyncio.coroutine
13
+ def expect_async(expecter, timeout=None):
14
+ # First process data that was previously read - if it maches, we don't need
15
+ # async stuff.
16
+ idx = expecter.existing_data()
17
+ if idx is not None:
18
+ return idx
19
+ if not expecter.spawn.async_pw_transport:
20
+ pw = PatternWaiter()
21
+ pw.set_expecter(expecter)
22
+ transport, pw = yield from asyncio.get_event_loop().connect_read_pipe(
23
+ lambda: pw, expecter.spawn
24
+ )
25
+ expecter.spawn.async_pw_transport = pw, transport
26
+ else:
27
+ pw, transport = expecter.spawn.async_pw_transport
28
+ pw.set_expecter(expecter)
29
+ transport.resume_reading()
30
+ try:
31
+ return (yield from asyncio.wait_for(pw.fut, timeout))
32
+ except asyncio.TimeoutError as e:
33
+ transport.pause_reading()
34
+ return expecter.timeout(e)
35
+
36
+
37
+ @asyncio.coroutine
38
+ def repl_run_command_async(repl, cmdlines, timeout=-1):
39
+ res = []
40
+ repl.child.sendline(cmdlines[0])
41
+ for line in cmdlines[1:]:
42
+ yield from repl._expect_prompt(timeout=timeout, async_=True)
43
+ res.append(repl.child.before)
44
+ repl.child.sendline(line)
45
+
46
+ # Command was fully submitted, now wait for the next prompt
47
+ prompt_idx = yield from repl._expect_prompt(timeout=timeout, async_=True)
48
+ if prompt_idx == 1:
49
+ # We got the continuation prompt - command was incomplete
50
+ repl.child.kill(signal.SIGINT)
51
+ yield from repl._expect_prompt(timeout=1, async_=True)
52
+ raise ValueError("Continuation prompt found - input was incomplete:")
53
+ return "".join(res + [repl.child.before])
54
+
55
+
56
+ class PatternWaiter(asyncio.Protocol):
57
+ transport = None
58
+
59
+ def set_expecter(self, expecter):
60
+ self.expecter = expecter
61
+ self.fut = asyncio.Future()
62
+
63
+ def found(self, result):
64
+ if not self.fut.done():
65
+ self.fut.set_result(result)
66
+ self.transport.pause_reading()
67
+
68
+ def error(self, exc):
69
+ if not self.fut.done():
70
+ self.fut.set_exception(exc)
71
+ self.transport.pause_reading()
72
+
73
+ def connection_made(self, transport):
74
+ self.transport = transport
75
+
76
+ def data_received(self, data):
77
+ spawn = self.expecter.spawn
78
+ s = spawn._decoder.decode(data)
79
+ spawn._log(s, "read")
80
+
81
+ if self.fut.done():
82
+ spawn._before.write(s)
83
+ spawn._buffer.write(s)
84
+ return
85
+
86
+ try:
87
+ index = self.expecter.new_data(s)
88
+ if index is not None:
89
+ # Found a match
90
+ self.found(index)
91
+ except Exception as e:
92
+ self.expecter.errored()
93
+ self.error(e)
94
+
95
+ def eof_received(self):
96
+ # N.B. If this gets called, async will close the pipe (the spawn object)
97
+ # for us
98
+ try:
99
+ self.expecter.spawn.flag_eof = True
100
+ index = self.expecter.eof()
101
+ except EOF as e:
102
+ self.error(e)
103
+ else:
104
+ self.found(index)
105
+
106
+ def connection_lost(self, exc):
107
+ if isinstance(exc, OSError) and exc.errno == errno.EIO:
108
+ # We may get here without eof_received being called, e.g on Linux
109
+ self.eof_received()
110
+ elif exc is not None:
111
+ self.error(exc)
evalkit_internvl/lib/python3.10/site-packages/pexpect/_async_w_await.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Implementation of coroutines using ``async def``/``await`` keywords.
2
+
3
+ These keywords replaced ``@asyncio.coroutine`` and ``yield from`` from
4
+ Python 3.5 onwards.
5
+ """
6
+ import asyncio
7
+ import errno
8
+ import signal
9
+ from sys import version_info as py_version_info
10
+
11
+ from pexpect import EOF
12
+
13
+ if py_version_info >= (3, 7):
14
+ # get_running_loop, new in 3.7, is preferred to get_event_loop
15
+ _loop_getter = asyncio.get_running_loop
16
+ else:
17
+ # Deprecation warning since 3.10
18
+ _loop_getter = asyncio.get_event_loop
19
+
20
+
21
+ async def expect_async(expecter, timeout=None):
22
+ # First process data that was previously read - if it maches, we don't need
23
+ # async stuff.
24
+ idx = expecter.existing_data()
25
+ if idx is not None:
26
+ return idx
27
+ if not expecter.spawn.async_pw_transport:
28
+ pattern_waiter = PatternWaiter()
29
+ pattern_waiter.set_expecter(expecter)
30
+ transport, pattern_waiter = await _loop_getter().connect_read_pipe(
31
+ lambda: pattern_waiter, expecter.spawn
32
+ )
33
+ expecter.spawn.async_pw_transport = pattern_waiter, transport
34
+ else:
35
+ pattern_waiter, transport = expecter.spawn.async_pw_transport
36
+ pattern_waiter.set_expecter(expecter)
37
+ transport.resume_reading()
38
+ try:
39
+ return await asyncio.wait_for(pattern_waiter.fut, timeout)
40
+ except asyncio.TimeoutError as exc:
41
+ transport.pause_reading()
42
+ return expecter.timeout(exc)
43
+
44
+
45
+ async def repl_run_command_async(repl, cmdlines, timeout=-1):
46
+ res = []
47
+ repl.child.sendline(cmdlines[0])
48
+ for line in cmdlines[1:]:
49
+ await repl._expect_prompt(timeout=timeout, async_=True)
50
+ res.append(repl.child.before)
51
+ repl.child.sendline(line)
52
+
53
+ # Command was fully submitted, now wait for the next prompt
54
+ prompt_idx = await repl._expect_prompt(timeout=timeout, async_=True)
55
+ if prompt_idx == 1:
56
+ # We got the continuation prompt - command was incomplete
57
+ repl.child.kill(signal.SIGINT)
58
+ await repl._expect_prompt(timeout=1, async_=True)
59
+ raise ValueError("Continuation prompt found - input was incomplete:")
60
+ return "".join(res + [repl.child.before])
61
+
62
+
63
+ class PatternWaiter(asyncio.Protocol):
64
+ transport = None
65
+
66
+ def set_expecter(self, expecter):
67
+ self.expecter = expecter
68
+ self.fut = asyncio.Future()
69
+
70
+ def found(self, result):
71
+ if not self.fut.done():
72
+ self.fut.set_result(result)
73
+ self.transport.pause_reading()
74
+
75
+ def error(self, exc):
76
+ if not self.fut.done():
77
+ self.fut.set_exception(exc)
78
+ self.transport.pause_reading()
79
+
80
+ def connection_made(self, transport):
81
+ self.transport = transport
82
+
83
+ def data_received(self, data):
84
+ spawn = self.expecter.spawn
85
+ s = spawn._decoder.decode(data)
86
+ spawn._log(s, "read")
87
+
88
+ if self.fut.done():
89
+ spawn._before.write(s)
90
+ spawn._buffer.write(s)
91
+ return
92
+
93
+ try:
94
+ index = self.expecter.new_data(s)
95
+ if index is not None:
96
+ # Found a match
97
+ self.found(index)
98
+ except Exception as exc:
99
+ self.expecter.errored()
100
+ self.error(exc)
101
+
102
+ def eof_received(self):
103
+ # N.B. If this gets called, async will close the pipe (the spawn object)
104
+ # for us
105
+ try:
106
+ self.expecter.spawn.flag_eof = True
107
+ index = self.expecter.eof()
108
+ except EOF as exc:
109
+ self.error(exc)
110
+ else:
111
+ self.found(index)
112
+
113
+ def connection_lost(self, exc):
114
+ if isinstance(exc, OSError) and exc.errno == errno.EIO:
115
+ # We may get here without eof_received being called, e.g on Linux
116
+ self.eof_received()
117
+ elif exc is not None:
118
+ self.error(exc)
evalkit_internvl/lib/python3.10/site-packages/pexpect/bashrc.sh ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Different platforms have different names for the systemwide bashrc
2
+ if [[ -f /etc/bashrc ]]; then
3
+ source /etc/bashrc
4
+ fi
5
+ if [[ -f /etc/bash.bashrc ]]; then
6
+ source /etc/bash.bashrc
7
+ fi
8
+ if [[ -f ~/.bashrc ]]; then
9
+ source ~/.bashrc
10
+ fi
11
+
12
+ # Reset PS1 so pexpect can find it
13
+ PS1="$"
14
+
15
+ # Unset PROMPT_COMMAND, so that it can't change PS1 to something unexpected.
16
+ unset PROMPT_COMMAND
17
+
18
+ bind 'set enable-bracketed-paste off'
evalkit_internvl/lib/python3.10/site-packages/pexpect/exceptions.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Exception classes used by Pexpect"""
2
+
3
+ import traceback
4
+ import sys
5
+
6
+ class ExceptionPexpect(Exception):
7
+ '''Base class for all exceptions raised by this module.
8
+ '''
9
+
10
+ def __init__(self, value):
11
+ super(ExceptionPexpect, self).__init__(value)
12
+ self.value = value
13
+
14
+ def __str__(self):
15
+ return str(self.value)
16
+
17
+ def get_trace(self):
18
+ '''This returns an abbreviated stack trace with lines that only concern
19
+ the caller. In other words, the stack trace inside the Pexpect module
20
+ is not included. '''
21
+
22
+ tblist = traceback.extract_tb(sys.exc_info()[2])
23
+ tblist = [item for item in tblist if ('pexpect/__init__' not in item[0])
24
+ and ('pexpect/expect' not in item[0])]
25
+ tblist = traceback.format_list(tblist)
26
+ return ''.join(tblist)
27
+
28
+
29
+ class EOF(ExceptionPexpect):
30
+ '''Raised when EOF is read from a child.
31
+ This usually means the child has exited.'''
32
+
33
+
34
+ class TIMEOUT(ExceptionPexpect):
35
+ '''Raised when a read time exceeds the timeout. '''
evalkit_internvl/lib/python3.10/site-packages/pexpect/expect.py ADDED
@@ -0,0 +1,371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+
3
+ from .exceptions import EOF, TIMEOUT
4
+
5
+ class Expecter(object):
6
+ def __init__(self, spawn, searcher, searchwindowsize=-1):
7
+ self.spawn = spawn
8
+ self.searcher = searcher
9
+ # A value of -1 means to use the figure from spawn, which should
10
+ # be None or a positive number.
11
+ if searchwindowsize == -1:
12
+ searchwindowsize = spawn.searchwindowsize
13
+ self.searchwindowsize = searchwindowsize
14
+ self.lookback = None
15
+ if hasattr(searcher, 'longest_string'):
16
+ self.lookback = searcher.longest_string
17
+
18
+ def do_search(self, window, freshlen):
19
+ spawn = self.spawn
20
+ searcher = self.searcher
21
+ if freshlen > len(window):
22
+ freshlen = len(window)
23
+ index = searcher.search(window, freshlen, self.searchwindowsize)
24
+ if index >= 0:
25
+ spawn._buffer = spawn.buffer_type()
26
+ spawn._buffer.write(window[searcher.end:])
27
+ spawn.before = spawn._before.getvalue()[
28
+ 0:-(len(window) - searcher.start)]
29
+ spawn._before = spawn.buffer_type()
30
+ spawn._before.write(window[searcher.end:])
31
+ spawn.after = window[searcher.start:searcher.end]
32
+ spawn.match = searcher.match
33
+ spawn.match_index = index
34
+ # Found a match
35
+ return index
36
+ elif self.searchwindowsize or self.lookback:
37
+ maintain = self.searchwindowsize or self.lookback
38
+ if spawn._buffer.tell() > maintain:
39
+ spawn._buffer = spawn.buffer_type()
40
+ spawn._buffer.write(window[-maintain:])
41
+
42
+ def existing_data(self):
43
+ # First call from a new call to expect_loop or expect_async.
44
+ # self.searchwindowsize may have changed.
45
+ # Treat all data as fresh.
46
+ spawn = self.spawn
47
+ before_len = spawn._before.tell()
48
+ buf_len = spawn._buffer.tell()
49
+ freshlen = before_len
50
+ if before_len > buf_len:
51
+ if not self.searchwindowsize:
52
+ spawn._buffer = spawn.buffer_type()
53
+ window = spawn._before.getvalue()
54
+ spawn._buffer.write(window)
55
+ elif buf_len < self.searchwindowsize:
56
+ spawn._buffer = spawn.buffer_type()
57
+ spawn._before.seek(
58
+ max(0, before_len - self.searchwindowsize))
59
+ window = spawn._before.read()
60
+ spawn._buffer.write(window)
61
+ else:
62
+ spawn._buffer.seek(max(0, buf_len - self.searchwindowsize))
63
+ window = spawn._buffer.read()
64
+ else:
65
+ if self.searchwindowsize:
66
+ spawn._buffer.seek(max(0, buf_len - self.searchwindowsize))
67
+ window = spawn._buffer.read()
68
+ else:
69
+ window = spawn._buffer.getvalue()
70
+ return self.do_search(window, freshlen)
71
+
72
+ def new_data(self, data):
73
+ # A subsequent call, after a call to existing_data.
74
+ spawn = self.spawn
75
+ freshlen = len(data)
76
+ spawn._before.write(data)
77
+ if not self.searchwindowsize:
78
+ if self.lookback:
79
+ # search lookback + new data.
80
+ old_len = spawn._buffer.tell()
81
+ spawn._buffer.write(data)
82
+ spawn._buffer.seek(max(0, old_len - self.lookback))
83
+ window = spawn._buffer.read()
84
+ else:
85
+ # copy the whole buffer (really slow for large datasets).
86
+ spawn._buffer.write(data)
87
+ window = spawn.buffer
88
+ else:
89
+ if len(data) >= self.searchwindowsize or not spawn._buffer.tell():
90
+ window = data[-self.searchwindowsize:]
91
+ spawn._buffer = spawn.buffer_type()
92
+ spawn._buffer.write(window[-self.searchwindowsize:])
93
+ else:
94
+ spawn._buffer.write(data)
95
+ new_len = spawn._buffer.tell()
96
+ spawn._buffer.seek(max(0, new_len - self.searchwindowsize))
97
+ window = spawn._buffer.read()
98
+ return self.do_search(window, freshlen)
99
+
100
+ def eof(self, err=None):
101
+ spawn = self.spawn
102
+
103
+ spawn.before = spawn._before.getvalue()
104
+ spawn._buffer = spawn.buffer_type()
105
+ spawn._before = spawn.buffer_type()
106
+ spawn.after = EOF
107
+ index = self.searcher.eof_index
108
+ if index >= 0:
109
+ spawn.match = EOF
110
+ spawn.match_index = index
111
+ return index
112
+ else:
113
+ spawn.match = None
114
+ spawn.match_index = None
115
+ msg = str(spawn)
116
+ msg += '\nsearcher: %s' % self.searcher
117
+ if err is not None:
118
+ msg = str(err) + '\n' + msg
119
+
120
+ exc = EOF(msg)
121
+ exc.__cause__ = None # in Python 3.x we can use "raise exc from None"
122
+ raise exc
123
+
124
+ def timeout(self, err=None):
125
+ spawn = self.spawn
126
+
127
+ spawn.before = spawn._before.getvalue()
128
+ spawn.after = TIMEOUT
129
+ index = self.searcher.timeout_index
130
+ if index >= 0:
131
+ spawn.match = TIMEOUT
132
+ spawn.match_index = index
133
+ return index
134
+ else:
135
+ spawn.match = None
136
+ spawn.match_index = None
137
+ msg = str(spawn)
138
+ msg += '\nsearcher: %s' % self.searcher
139
+ if err is not None:
140
+ msg = str(err) + '\n' + msg
141
+
142
+ exc = TIMEOUT(msg)
143
+ exc.__cause__ = None # in Python 3.x we can use "raise exc from None"
144
+ raise exc
145
+
146
+ def errored(self):
147
+ spawn = self.spawn
148
+ spawn.before = spawn._before.getvalue()
149
+ spawn.after = None
150
+ spawn.match = None
151
+ spawn.match_index = None
152
+
153
+ def expect_loop(self, timeout=-1):
154
+ """Blocking expect"""
155
+ spawn = self.spawn
156
+
157
+ if timeout is not None:
158
+ end_time = time.time() + timeout
159
+
160
+ try:
161
+ idx = self.existing_data()
162
+ if idx is not None:
163
+ return idx
164
+ while True:
165
+ # No match at this point
166
+ if (timeout is not None) and (timeout < 0):
167
+ return self.timeout()
168
+ # Still have time left, so read more data
169
+ incoming = spawn.read_nonblocking(spawn.maxread, timeout)
170
+ if self.spawn.delayafterread is not None:
171
+ time.sleep(self.spawn.delayafterread)
172
+ idx = self.new_data(incoming)
173
+ # Keep reading until exception or return.
174
+ if idx is not None:
175
+ return idx
176
+ if timeout is not None:
177
+ timeout = end_time - time.time()
178
+ except EOF as e:
179
+ return self.eof(e)
180
+ except TIMEOUT as e:
181
+ return self.timeout(e)
182
+ except:
183
+ self.errored()
184
+ raise
185
+
186
+
187
+ class searcher_string(object):
188
+ '''This is a plain string search helper for the spawn.expect_any() method.
189
+ This helper class is for speed. For more powerful regex patterns
190
+ see the helper class, searcher_re.
191
+
192
+ Attributes:
193
+
194
+ eof_index - index of EOF, or -1
195
+ timeout_index - index of TIMEOUT, or -1
196
+
197
+ After a successful match by the search() method the following attributes
198
+ are available:
199
+
200
+ start - index into the buffer, first byte of match
201
+ end - index into the buffer, first byte after match
202
+ match - the matching string itself
203
+
204
+ '''
205
+
206
+ def __init__(self, strings):
207
+ '''This creates an instance of searcher_string. This argument 'strings'
208
+ may be a list; a sequence of strings; or the EOF or TIMEOUT types. '''
209
+
210
+ self.eof_index = -1
211
+ self.timeout_index = -1
212
+ self._strings = []
213
+ self.longest_string = 0
214
+ for n, s in enumerate(strings):
215
+ if s is EOF:
216
+ self.eof_index = n
217
+ continue
218
+ if s is TIMEOUT:
219
+ self.timeout_index = n
220
+ continue
221
+ self._strings.append((n, s))
222
+ if len(s) > self.longest_string:
223
+ self.longest_string = len(s)
224
+
225
+ def __str__(self):
226
+ '''This returns a human-readable string that represents the state of
227
+ the object.'''
228
+
229
+ ss = [(ns[0], ' %d: %r' % ns) for ns in self._strings]
230
+ ss.append((-1, 'searcher_string:'))
231
+ if self.eof_index >= 0:
232
+ ss.append((self.eof_index, ' %d: EOF' % self.eof_index))
233
+ if self.timeout_index >= 0:
234
+ ss.append((self.timeout_index,
235
+ ' %d: TIMEOUT' % self.timeout_index))
236
+ ss.sort()
237
+ ss = list(zip(*ss))[1]
238
+ return '\n'.join(ss)
239
+
240
+ def search(self, buffer, freshlen, searchwindowsize=None):
241
+ '''This searches 'buffer' for the first occurrence of one of the search
242
+ strings. 'freshlen' must indicate the number of bytes at the end of
243
+ 'buffer' which have not been searched before. It helps to avoid
244
+ searching the same, possibly big, buffer over and over again.
245
+
246
+ See class spawn for the 'searchwindowsize' argument.
247
+
248
+ If there is a match this returns the index of that string, and sets
249
+ 'start', 'end' and 'match'. Otherwise, this returns -1. '''
250
+
251
+ first_match = None
252
+
253
+ # 'freshlen' helps a lot here. Further optimizations could
254
+ # possibly include:
255
+ #
256
+ # using something like the Boyer-Moore Fast String Searching
257
+ # Algorithm; pre-compiling the search through a list of
258
+ # strings into something that can scan the input once to
259
+ # search for all N strings; realize that if we search for
260
+ # ['bar', 'baz'] and the input is '...foo' we need not bother
261
+ # rescanning until we've read three more bytes.
262
+ #
263
+ # Sadly, I don't know enough about this interesting topic. /grahn
264
+
265
+ for index, s in self._strings:
266
+ if searchwindowsize is None:
267
+ # the match, if any, can only be in the fresh data,
268
+ # or at the very end of the old data
269
+ offset = -(freshlen + len(s))
270
+ else:
271
+ # better obey searchwindowsize
272
+ offset = -searchwindowsize
273
+ n = buffer.find(s, offset)
274
+ if n >= 0 and (first_match is None or n < first_match):
275
+ first_match = n
276
+ best_index, best_match = index, s
277
+ if first_match is None:
278
+ return -1
279
+ self.match = best_match
280
+ self.start = first_match
281
+ self.end = self.start + len(self.match)
282
+ return best_index
283
+
284
+
285
+ class searcher_re(object):
286
+ '''This is regular expression string search helper for the
287
+ spawn.expect_any() method. This helper class is for powerful
288
+ pattern matching. For speed, see the helper class, searcher_string.
289
+
290
+ Attributes:
291
+
292
+ eof_index - index of EOF, or -1
293
+ timeout_index - index of TIMEOUT, or -1
294
+
295
+ After a successful match by the search() method the following attributes
296
+ are available:
297
+
298
+ start - index into the buffer, first byte of match
299
+ end - index into the buffer, first byte after match
300
+ match - the re.match object returned by a successful re.search
301
+
302
+ '''
303
+
304
+ def __init__(self, patterns):
305
+ '''This creates an instance that searches for 'patterns' Where
306
+ 'patterns' may be a list or other sequence of compiled regular
307
+ expressions, or the EOF or TIMEOUT types.'''
308
+
309
+ self.eof_index = -1
310
+ self.timeout_index = -1
311
+ self._searches = []
312
+ for n, s in enumerate(patterns):
313
+ if s is EOF:
314
+ self.eof_index = n
315
+ continue
316
+ if s is TIMEOUT:
317
+ self.timeout_index = n
318
+ continue
319
+ self._searches.append((n, s))
320
+
321
+ def __str__(self):
322
+ '''This returns a human-readable string that represents the state of
323
+ the object.'''
324
+
325
+ #ss = [(n, ' %d: re.compile("%s")' %
326
+ # (n, repr(s.pattern))) for n, s in self._searches]
327
+ ss = list()
328
+ for n, s in self._searches:
329
+ ss.append((n, ' %d: re.compile(%r)' % (n, s.pattern)))
330
+ ss.append((-1, 'searcher_re:'))
331
+ if self.eof_index >= 0:
332
+ ss.append((self.eof_index, ' %d: EOF' % self.eof_index))
333
+ if self.timeout_index >= 0:
334
+ ss.append((self.timeout_index, ' %d: TIMEOUT' %
335
+ self.timeout_index))
336
+ ss.sort()
337
+ ss = list(zip(*ss))[1]
338
+ return '\n'.join(ss)
339
+
340
+ def search(self, buffer, freshlen, searchwindowsize=None):
341
+ '''This searches 'buffer' for the first occurrence of one of the regular
342
+ expressions. 'freshlen' must indicate the number of bytes at the end of
343
+ 'buffer' which have not been searched before.
344
+
345
+ See class spawn for the 'searchwindowsize' argument.
346
+
347
+ If there is a match this returns the index of that string, and sets
348
+ 'start', 'end' and 'match'. Otherwise, returns -1.'''
349
+
350
+ first_match = None
351
+ # 'freshlen' doesn't help here -- we cannot predict the
352
+ # length of a match, and the re module provides no help.
353
+ if searchwindowsize is None:
354
+ searchstart = 0
355
+ else:
356
+ searchstart = max(0, len(buffer) - searchwindowsize)
357
+ for index, s in self._searches:
358
+ match = s.search(buffer, searchstart)
359
+ if match is None:
360
+ continue
361
+ n = match.start()
362
+ if first_match is None or n < first_match:
363
+ first_match = n
364
+ the_match = match
365
+ best_index = index
366
+ if first_match is None:
367
+ return -1
368
+ self.start = first_match
369
+ self.match = the_match
370
+ self.end = self.match.end()
371
+ return best_index
evalkit_internvl/lib/python3.10/site-packages/pexpect/fdpexpect.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''This is like :mod:`pexpect`, but it will work with any file descriptor that you
2
+ pass it. You are responsible for opening and close the file descriptor.
3
+ This allows you to use Pexpect with sockets and named pipes (FIFOs).
4
+
5
+ .. note::
6
+ socket.fileno() does not give a readable file descriptor on windows.
7
+ Use :mod:`pexpect.socket_pexpect` for cross-platform socket support
8
+
9
+ PEXPECT LICENSE
10
+
11
+ This license is approved by the OSI and FSF as GPL-compatible.
12
+ http://opensource.org/licenses/isc-license.txt
13
+
14
+ Copyright (c) 2012, Noah Spurrier <noah@noah.org>
15
+ PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
16
+ PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
17
+ COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
18
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
19
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
20
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
21
+ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
22
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
23
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
24
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
25
+
26
+ '''
27
+
28
+ from .spawnbase import SpawnBase
29
+ from .exceptions import ExceptionPexpect, TIMEOUT
30
+ from .utils import select_ignore_interrupts, poll_ignore_interrupts
31
+ import os
32
+
33
+ __all__ = ['fdspawn']
34
+
35
+ class fdspawn(SpawnBase):
36
+ '''This is like pexpect.spawn but allows you to supply your own open file
37
+ descriptor. For example, you could use it to read through a file looking
38
+ for patterns, or to control a modem or serial device. '''
39
+
40
+ def __init__ (self, fd, args=None, timeout=30, maxread=2000, searchwindowsize=None,
41
+ logfile=None, encoding=None, codec_errors='strict', use_poll=False):
42
+ '''This takes a file descriptor (an int) or an object that support the
43
+ fileno() method (returning an int). All Python file-like objects
44
+ support fileno(). '''
45
+
46
+ if type(fd) != type(0) and hasattr(fd, 'fileno'):
47
+ fd = fd.fileno()
48
+
49
+ if type(fd) != type(0):
50
+ raise ExceptionPexpect('The fd argument is not an int. If this is a command string then maybe you want to use pexpect.spawn.')
51
+
52
+ try: # make sure fd is a valid file descriptor
53
+ os.fstat(fd)
54
+ except OSError:
55
+ raise ExceptionPexpect('The fd argument is not a valid file descriptor.')
56
+
57
+ self.args = None
58
+ self.command = None
59
+ SpawnBase.__init__(self, timeout, maxread, searchwindowsize, logfile,
60
+ encoding=encoding, codec_errors=codec_errors)
61
+ self.child_fd = fd
62
+ self.own_fd = False
63
+ self.closed = False
64
+ self.name = '<file descriptor %d>' % fd
65
+ self.use_poll = use_poll
66
+
67
+ def close (self):
68
+ """Close the file descriptor.
69
+
70
+ Calling this method a second time does nothing, but if the file
71
+ descriptor was closed elsewhere, :class:`OSError` will be raised.
72
+ """
73
+ if self.child_fd == -1:
74
+ return
75
+
76
+ self.flush()
77
+ os.close(self.child_fd)
78
+ self.child_fd = -1
79
+ self.closed = True
80
+
81
+ def isalive (self):
82
+ '''This checks if the file descriptor is still valid. If :func:`os.fstat`
83
+ does not raise an exception then we assume it is alive. '''
84
+
85
+ if self.child_fd == -1:
86
+ return False
87
+ try:
88
+ os.fstat(self.child_fd)
89
+ return True
90
+ except:
91
+ return False
92
+
93
+ def terminate (self, force=False): # pragma: no cover
94
+ '''Deprecated and invalid. Just raises an exception.'''
95
+ raise ExceptionPexpect('This method is not valid for file descriptors.')
96
+
97
+ # These four methods are left around for backwards compatibility, but not
98
+ # documented as part of fdpexpect. You're encouraged to use os.write
99
+ # directly.
100
+ def send(self, s):
101
+ "Write to fd, return number of bytes written"
102
+ s = self._coerce_send_string(s)
103
+ self._log(s, 'send')
104
+
105
+ b = self._encoder.encode(s, final=False)
106
+ return os.write(self.child_fd, b)
107
+
108
+ def sendline(self, s):
109
+ "Write to fd with trailing newline, return number of bytes written"
110
+ s = self._coerce_send_string(s)
111
+ return self.send(s + self.linesep)
112
+
113
+ def write(self, s):
114
+ "Write to fd, return None"
115
+ self.send(s)
116
+
117
+ def writelines(self, sequence):
118
+ "Call self.write() for each item in sequence"
119
+ for s in sequence:
120
+ self.write(s)
121
+
122
+ def read_nonblocking(self, size=1, timeout=-1):
123
+ """
124
+ Read from the file descriptor and return the result as a string.
125
+
126
+ The read_nonblocking method of :class:`SpawnBase` assumes that a call
127
+ to os.read will not block (timeout parameter is ignored). This is not
128
+ the case for POSIX file-like objects such as sockets and serial ports.
129
+
130
+ Use :func:`select.select`, timeout is implemented conditionally for
131
+ POSIX systems.
132
+
133
+ :param int size: Read at most *size* bytes.
134
+ :param int timeout: Wait timeout seconds for file descriptor to be
135
+ ready to read. When -1 (default), use self.timeout. When 0, poll.
136
+ :return: String containing the bytes read
137
+ """
138
+ if os.name == 'posix':
139
+ if timeout == -1:
140
+ timeout = self.timeout
141
+ rlist = [self.child_fd]
142
+ wlist = []
143
+ xlist = []
144
+ if self.use_poll:
145
+ rlist = poll_ignore_interrupts(rlist, timeout)
146
+ else:
147
+ rlist, wlist, xlist = select_ignore_interrupts(
148
+ rlist, wlist, xlist, timeout
149
+ )
150
+ if self.child_fd not in rlist:
151
+ raise TIMEOUT('Timeout exceeded.')
152
+ return super(fdspawn, self).read_nonblocking(size)
evalkit_internvl/lib/python3.10/site-packages/pexpect/popen_spawn.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Provides an interface like pexpect.spawn interface using subprocess.Popen
2
+ """
3
+ import os
4
+ import threading
5
+ import subprocess
6
+ import sys
7
+ import time
8
+ import signal
9
+ import shlex
10
+
11
+ try:
12
+ from queue import Queue, Empty # Python 3
13
+ except ImportError:
14
+ from Queue import Queue, Empty # Python 2
15
+
16
+ from .spawnbase import SpawnBase, PY3
17
+ from .exceptions import EOF
18
+ from .utils import string_types
19
+
20
+ class PopenSpawn(SpawnBase):
21
+ def __init__(self, cmd, timeout=30, maxread=2000, searchwindowsize=None,
22
+ logfile=None, cwd=None, env=None, encoding=None,
23
+ codec_errors='strict', preexec_fn=None):
24
+ super(PopenSpawn, self).__init__(timeout=timeout, maxread=maxread,
25
+ searchwindowsize=searchwindowsize, logfile=logfile,
26
+ encoding=encoding, codec_errors=codec_errors)
27
+
28
+ # Note that `SpawnBase` initializes `self.crlf` to `\r\n`
29
+ # because the default behaviour for a PTY is to convert
30
+ # incoming LF to `\r\n` (see the `onlcr` flag and
31
+ # https://stackoverflow.com/a/35887657/5397009). Here we set
32
+ # it to `os.linesep` because that is what the spawned
33
+ # application outputs by default and `popen` doesn't translate
34
+ # anything.
35
+ if encoding is None:
36
+ self.crlf = os.linesep.encode ("ascii")
37
+ else:
38
+ self.crlf = self.string_type (os.linesep)
39
+
40
+ kwargs = dict(bufsize=0, stdin=subprocess.PIPE,
41
+ stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
42
+ cwd=cwd, preexec_fn=preexec_fn, env=env)
43
+
44
+ if sys.platform == 'win32':
45
+ startupinfo = subprocess.STARTUPINFO()
46
+ startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
47
+ kwargs['startupinfo'] = startupinfo
48
+ kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
49
+
50
+ if isinstance(cmd, string_types) and sys.platform != 'win32':
51
+ cmd = shlex.split(cmd, posix=os.name == 'posix')
52
+
53
+ self.proc = subprocess.Popen(cmd, **kwargs)
54
+ self.pid = self.proc.pid
55
+ self.closed = False
56
+ self._buf = self.string_type()
57
+
58
+ self._read_queue = Queue()
59
+ self._read_thread = threading.Thread(target=self._read_incoming)
60
+ self._read_thread.daemon = True
61
+ self._read_thread.start()
62
+
63
+ _read_reached_eof = False
64
+
65
+ def read_nonblocking(self, size, timeout):
66
+ buf = self._buf
67
+ if self._read_reached_eof:
68
+ # We have already finished reading. Use up any buffered data,
69
+ # then raise EOF
70
+ if buf:
71
+ self._buf = buf[size:]
72
+ return buf[:size]
73
+ else:
74
+ self.flag_eof = True
75
+ raise EOF('End Of File (EOF).')
76
+
77
+ if timeout == -1:
78
+ timeout = self.timeout
79
+ elif timeout is None:
80
+ timeout = 1e6
81
+
82
+ t0 = time.time()
83
+ while (time.time() - t0) < timeout and size and len(buf) < size:
84
+ try:
85
+ incoming = self._read_queue.get_nowait()
86
+ except Empty:
87
+ break
88
+ else:
89
+ if incoming is None:
90
+ self._read_reached_eof = True
91
+ break
92
+
93
+ buf += self._decoder.decode(incoming, final=False)
94
+
95
+ r, self._buf = buf[:size], buf[size:]
96
+
97
+ self._log(r, 'read')
98
+ return r
99
+
100
+ def _read_incoming(self):
101
+ """Run in a thread to move output from a pipe to a queue."""
102
+ fileno = self.proc.stdout.fileno()
103
+ while 1:
104
+ buf = b''
105
+ try:
106
+ buf = os.read(fileno, 1024)
107
+ except OSError as e:
108
+ self._log(e, 'read')
109
+
110
+ if not buf:
111
+ # This indicates we have reached EOF
112
+ self._read_queue.put(None)
113
+ return
114
+
115
+ self._read_queue.put(buf)
116
+
117
+ def write(self, s):
118
+ '''This is similar to send() except that there is no return value.
119
+ '''
120
+ self.send(s)
121
+
122
+ def writelines(self, sequence):
123
+ '''This calls write() for each element in the sequence.
124
+
125
+ The sequence can be any iterable object producing strings, typically a
126
+ list of strings. This does not add line separators. There is no return
127
+ value.
128
+ '''
129
+ for s in sequence:
130
+ self.send(s)
131
+
132
+ def send(self, s):
133
+ '''Send data to the subprocess' stdin.
134
+
135
+ Returns the number of bytes written.
136
+ '''
137
+ s = self._coerce_send_string(s)
138
+ self._log(s, 'send')
139
+
140
+ b = self._encoder.encode(s, final=False)
141
+ if PY3:
142
+ return self.proc.stdin.write(b)
143
+ else:
144
+ # On Python 2, .write() returns None, so we return the length of
145
+ # bytes written ourselves. This assumes they all got written.
146
+ self.proc.stdin.write(b)
147
+ return len(b)
148
+
149
+ def sendline(self, s=''):
150
+ '''Wraps send(), sending string ``s`` to child process, with os.linesep
151
+ automatically appended. Returns number of bytes written. '''
152
+
153
+ n = self.send(s)
154
+ return n + self.send(self.linesep)
155
+
156
+ def wait(self):
157
+ '''Wait for the subprocess to finish.
158
+
159
+ Returns the exit code.
160
+ '''
161
+ status = self.proc.wait()
162
+ if status >= 0:
163
+ self.exitstatus = status
164
+ self.signalstatus = None
165
+ else:
166
+ self.exitstatus = None
167
+ self.signalstatus = -status
168
+ self.terminated = True
169
+ return status
170
+
171
+ def kill(self, sig):
172
+ '''Sends a Unix signal to the subprocess.
173
+
174
+ Use constants from the :mod:`signal` module to specify which signal.
175
+ '''
176
+ if sys.platform == 'win32':
177
+ if sig in [signal.SIGINT, signal.CTRL_C_EVENT]:
178
+ sig = signal.CTRL_C_EVENT
179
+ elif sig in [signal.SIGBREAK, signal.CTRL_BREAK_EVENT]:
180
+ sig = signal.CTRL_BREAK_EVENT
181
+ else:
182
+ sig = signal.SIGTERM
183
+
184
+ os.kill(self.proc.pid, sig)
185
+
186
+ def sendeof(self):
187
+ '''Closes the stdin pipe from the writing end.'''
188
+ self.proc.stdin.close()
evalkit_internvl/lib/python3.10/site-packages/pexpect/pty_spawn.py ADDED
@@ -0,0 +1,860 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import time
4
+ import pty
5
+ import tty
6
+ import errno
7
+ import signal
8
+ from contextlib import contextmanager
9
+
10
+ import ptyprocess
11
+ from ptyprocess.ptyprocess import use_native_pty_fork
12
+
13
+ from .exceptions import ExceptionPexpect, EOF, TIMEOUT
14
+ from .spawnbase import SpawnBase
15
+ from .utils import (
16
+ which, split_command_line, select_ignore_interrupts, poll_ignore_interrupts
17
+ )
18
+
19
+ @contextmanager
20
+ def _wrap_ptyprocess_err():
21
+ """Turn ptyprocess errors into our own ExceptionPexpect errors"""
22
+ try:
23
+ yield
24
+ except ptyprocess.PtyProcessError as e:
25
+ raise ExceptionPexpect(*e.args)
26
+
27
+ PY3 = (sys.version_info[0] >= 3)
28
+
29
+ class spawn(SpawnBase):
30
+ '''This is the main class interface for Pexpect. Use this class to start
31
+ and control child applications. '''
32
+
33
+ # This is purely informational now - changing it has no effect
34
+ use_native_pty_fork = use_native_pty_fork
35
+
36
+ def __init__(self, command, args=[], timeout=30, maxread=2000,
37
+ searchwindowsize=None, logfile=None, cwd=None, env=None,
38
+ ignore_sighup=False, echo=True, preexec_fn=None,
39
+ encoding=None, codec_errors='strict', dimensions=None,
40
+ use_poll=False):
41
+ '''This is the constructor. The command parameter may be a string that
42
+ includes a command and any arguments to the command. For example::
43
+
44
+ child = pexpect.spawn('/usr/bin/ftp')
45
+ child = pexpect.spawn('/usr/bin/ssh user@example.com')
46
+ child = pexpect.spawn('ls -latr /tmp')
47
+
48
+ You may also construct it with a list of arguments like so::
49
+
50
+ child = pexpect.spawn('/usr/bin/ftp', [])
51
+ child = pexpect.spawn('/usr/bin/ssh', ['user@example.com'])
52
+ child = pexpect.spawn('ls', ['-latr', '/tmp'])
53
+
54
+ After this the child application will be created and will be ready to
55
+ talk to. For normal use, see expect() and send() and sendline().
56
+
57
+ Remember that Pexpect does NOT interpret shell meta characters such as
58
+ redirect, pipe, or wild cards (``>``, ``|``, or ``*``). This is a
59
+ common mistake. If you want to run a command and pipe it through
60
+ another command then you must also start a shell. For example::
61
+
62
+ child = pexpect.spawn('/bin/bash -c "ls -l | grep LOG > logs.txt"')
63
+ child.expect(pexpect.EOF)
64
+
65
+ The second form of spawn (where you pass a list of arguments) is useful
66
+ in situations where you wish to spawn a command and pass it its own
67
+ argument list. This can make syntax more clear. For example, the
68
+ following is equivalent to the previous example::
69
+
70
+ shell_cmd = 'ls -l | grep LOG > logs.txt'
71
+ child = pexpect.spawn('/bin/bash', ['-c', shell_cmd])
72
+ child.expect(pexpect.EOF)
73
+
74
+ The maxread attribute sets the read buffer size. This is maximum number
75
+ of bytes that Pexpect will try to read from a TTY at one time. Setting
76
+ the maxread size to 1 will turn off buffering. Setting the maxread
77
+ value higher may help performance in cases where large amounts of
78
+ output are read back from the child. This feature is useful in
79
+ conjunction with searchwindowsize.
80
+
81
+ When the keyword argument *searchwindowsize* is None (default), the
82
+ full buffer is searched at each iteration of receiving incoming data.
83
+ The default number of bytes scanned at each iteration is very large
84
+ and may be reduced to collaterally reduce search cost. After
85
+ :meth:`~.expect` returns, the full buffer attribute remains up to
86
+ size *maxread* irrespective of *searchwindowsize* value.
87
+
88
+ When the keyword argument ``timeout`` is specified as a number,
89
+ (default: *30*), then :class:`TIMEOUT` will be raised after the value
90
+ specified has elapsed, in seconds, for any of the :meth:`~.expect`
91
+ family of method calls. When None, TIMEOUT will not be raised, and
92
+ :meth:`~.expect` may block indefinitely until match.
93
+
94
+
95
+ The logfile member turns on or off logging. All input and output will
96
+ be copied to the given file object. Set logfile to None to stop
97
+ logging. This is the default. Set logfile to sys.stdout to echo
98
+ everything to standard output. The logfile is flushed after each write.
99
+
100
+ Example log input and output to a file::
101
+
102
+ child = pexpect.spawn('some_command')
103
+ fout = open('mylog.txt','wb')
104
+ child.logfile = fout
105
+
106
+ Example log to stdout::
107
+
108
+ # In Python 2:
109
+ child = pexpect.spawn('some_command')
110
+ child.logfile = sys.stdout
111
+
112
+ # In Python 3, we'll use the ``encoding`` argument to decode data
113
+ # from the subprocess and handle it as unicode:
114
+ child = pexpect.spawn('some_command', encoding='utf-8')
115
+ child.logfile = sys.stdout
116
+
117
+ The logfile_read and logfile_send members can be used to separately log
118
+ the input from the child and output sent to the child. Sometimes you
119
+ don't want to see everything you write to the child. You only want to
120
+ log what the child sends back. For example::
121
+
122
+ child = pexpect.spawn('some_command')
123
+ child.logfile_read = sys.stdout
124
+
125
+ You will need to pass an encoding to spawn in the above code if you are
126
+ using Python 3.
127
+
128
+ To separately log output sent to the child use logfile_send::
129
+
130
+ child.logfile_send = fout
131
+
132
+ If ``ignore_sighup`` is True, the child process will ignore SIGHUP
133
+ signals. The default is False from Pexpect 4.0, meaning that SIGHUP
134
+ will be handled normally by the child.
135
+
136
+ The delaybeforesend helps overcome a weird behavior that many users
137
+ were experiencing. The typical problem was that a user would expect() a
138
+ "Password:" prompt and then immediately call sendline() to send the
139
+ password. The user would then see that their password was echoed back
140
+ to them. Passwords don't normally echo. The problem is caused by the
141
+ fact that most applications print out the "Password" prompt and then
142
+ turn off stdin echo, but if you send your password before the
143
+ application turned off echo, then you get your password echoed.
144
+ Normally this wouldn't be a problem when interacting with a human at a
145
+ real keyboard. If you introduce a slight delay just before writing then
146
+ this seems to clear up the problem. This was such a common problem for
147
+ many users that I decided that the default pexpect behavior should be
148
+ to sleep just before writing to the child application. 1/20th of a
149
+ second (50 ms) seems to be enough to clear up the problem. You can set
150
+ delaybeforesend to None to return to the old behavior.
151
+
152
+ Note that spawn is clever about finding commands on your path.
153
+ It uses the same logic that "which" uses to find executables.
154
+
155
+ If you wish to get the exit status of the child you must call the
156
+ close() method. The exit or signal status of the child will be stored
157
+ in self.exitstatus or self.signalstatus. If the child exited normally
158
+ then exitstatus will store the exit return code and signalstatus will
159
+ be None. If the child was terminated abnormally with a signal then
160
+ signalstatus will store the signal value and exitstatus will be None::
161
+
162
+ child = pexpect.spawn('some_command')
163
+ child.close()
164
+ print(child.exitstatus, child.signalstatus)
165
+
166
+ If you need more detail you can also read the self.status member which
167
+ stores the status returned by os.waitpid. You can interpret this using
168
+ os.WIFEXITED/os.WEXITSTATUS or os.WIFSIGNALED/os.TERMSIG.
169
+
170
+ The echo attribute may be set to False to disable echoing of input.
171
+ As a pseudo-terminal, all input echoed by the "keyboard" (send()
172
+ or sendline()) will be repeated to output. For many cases, it is
173
+ not desirable to have echo enabled, and it may be later disabled
174
+ using setecho(False) followed by waitnoecho(). However, for some
175
+ platforms such as Solaris, this is not possible, and should be
176
+ disabled immediately on spawn.
177
+
178
+ If preexec_fn is given, it will be called in the child process before
179
+ launching the given command. This is useful to e.g. reset inherited
180
+ signal handlers.
181
+
182
+ The dimensions attribute specifies the size of the pseudo-terminal as
183
+ seen by the subprocess, and is specified as a two-entry tuple (rows,
184
+ columns). If this is unspecified, the defaults in ptyprocess will apply.
185
+
186
+ The use_poll attribute enables using select.poll() over select.select()
187
+ for socket handling. This is handy if your system could have > 1024 fds
188
+ '''
189
+ super(spawn, self).__init__(timeout=timeout, maxread=maxread, searchwindowsize=searchwindowsize,
190
+ logfile=logfile, encoding=encoding, codec_errors=codec_errors)
191
+ self.STDIN_FILENO = pty.STDIN_FILENO
192
+ self.STDOUT_FILENO = pty.STDOUT_FILENO
193
+ self.STDERR_FILENO = pty.STDERR_FILENO
194
+ self.str_last_chars = 100
195
+ self.cwd = cwd
196
+ self.env = env
197
+ self.echo = echo
198
+ self.ignore_sighup = ignore_sighup
199
+ self.__irix_hack = sys.platform.lower().startswith('irix')
200
+ if command is None:
201
+ self.command = None
202
+ self.args = None
203
+ self.name = '<pexpect factory incomplete>'
204
+ else:
205
+ self._spawn(command, args, preexec_fn, dimensions)
206
+ self.use_poll = use_poll
207
+
208
+ def __str__(self):
209
+ '''This returns a human-readable string that represents the state of
210
+ the object. '''
211
+
212
+ s = []
213
+ s.append(repr(self))
214
+ s.append('command: ' + str(self.command))
215
+ s.append('args: %r' % (self.args,))
216
+ s.append('buffer (last %s chars): %r' % (self.str_last_chars,self.buffer[-self.str_last_chars:]))
217
+ s.append('before (last %s chars): %r' % (self.str_last_chars,self.before[-self.str_last_chars:] if self.before else ''))
218
+ s.append('after: %r' % (self.after,))
219
+ s.append('match: %r' % (self.match,))
220
+ s.append('match_index: ' + str(self.match_index))
221
+ s.append('exitstatus: ' + str(self.exitstatus))
222
+ if hasattr(self, 'ptyproc'):
223
+ s.append('flag_eof: ' + str(self.flag_eof))
224
+ s.append('pid: ' + str(self.pid))
225
+ s.append('child_fd: ' + str(self.child_fd))
226
+ s.append('closed: ' + str(self.closed))
227
+ s.append('timeout: ' + str(self.timeout))
228
+ s.append('delimiter: ' + str(self.delimiter))
229
+ s.append('logfile: ' + str(self.logfile))
230
+ s.append('logfile_read: ' + str(self.logfile_read))
231
+ s.append('logfile_send: ' + str(self.logfile_send))
232
+ s.append('maxread: ' + str(self.maxread))
233
+ s.append('ignorecase: ' + str(self.ignorecase))
234
+ s.append('searchwindowsize: ' + str(self.searchwindowsize))
235
+ s.append('delaybeforesend: ' + str(self.delaybeforesend))
236
+ s.append('delayafterclose: ' + str(self.delayafterclose))
237
+ s.append('delayafterterminate: ' + str(self.delayafterterminate))
238
+ return '\n'.join(s)
239
+
240
+ def _spawn(self, command, args=[], preexec_fn=None, dimensions=None):
241
+ '''This starts the given command in a child process. This does all the
242
+ fork/exec type of stuff for a pty. This is called by __init__. If args
243
+ is empty then command will be parsed (split on spaces) and args will be
244
+ set to parsed arguments. '''
245
+
246
+ # The pid and child_fd of this object get set by this method.
247
+ # Note that it is difficult for this method to fail.
248
+ # You cannot detect if the child process cannot start.
249
+ # So the only way you can tell if the child process started
250
+ # or not is to try to read from the file descriptor. If you get
251
+ # EOF immediately then it means that the child is already dead.
252
+ # That may not necessarily be bad because you may have spawned a child
253
+ # that performs some task; creates no stdout output; and then dies.
254
+
255
+ # If command is an int type then it may represent a file descriptor.
256
+ if isinstance(command, type(0)):
257
+ raise ExceptionPexpect('Command is an int type. ' +
258
+ 'If this is a file descriptor then maybe you want to ' +
259
+ 'use fdpexpect.fdspawn which takes an existing ' +
260
+ 'file descriptor instead of a command string.')
261
+
262
+ if not isinstance(args, type([])):
263
+ raise TypeError('The argument, args, must be a list.')
264
+
265
+ if args == []:
266
+ self.args = split_command_line(command)
267
+ self.command = self.args[0]
268
+ else:
269
+ # Make a shallow copy of the args list.
270
+ self.args = args[:]
271
+ self.args.insert(0, command)
272
+ self.command = command
273
+
274
+ command_with_path = which(self.command, env=self.env)
275
+ if command_with_path is None:
276
+ raise ExceptionPexpect('The command was not found or was not ' +
277
+ 'executable: %s.' % self.command)
278
+ self.command = command_with_path
279
+ self.args[0] = self.command
280
+
281
+ self.name = '<' + ' '.join(self.args) + '>'
282
+
283
+ assert self.pid is None, 'The pid member must be None.'
284
+ assert self.command is not None, 'The command member must not be None.'
285
+
286
+ kwargs = {'echo': self.echo, 'preexec_fn': preexec_fn}
287
+ if self.ignore_sighup:
288
+ def preexec_wrapper():
289
+ "Set SIGHUP to be ignored, then call the real preexec_fn"
290
+ signal.signal(signal.SIGHUP, signal.SIG_IGN)
291
+ if preexec_fn is not None:
292
+ preexec_fn()
293
+ kwargs['preexec_fn'] = preexec_wrapper
294
+
295
+ if dimensions is not None:
296
+ kwargs['dimensions'] = dimensions
297
+
298
+ if self.encoding is not None:
299
+ # Encode command line using the specified encoding
300
+ self.args = [a if isinstance(a, bytes) else a.encode(self.encoding)
301
+ for a in self.args]
302
+
303
+ self.ptyproc = self._spawnpty(self.args, env=self.env,
304
+ cwd=self.cwd, **kwargs)
305
+
306
+ self.pid = self.ptyproc.pid
307
+ self.child_fd = self.ptyproc.fd
308
+
309
+
310
+ self.terminated = False
311
+ self.closed = False
312
+
313
+ def _spawnpty(self, args, **kwargs):
314
+ '''Spawn a pty and return an instance of PtyProcess.'''
315
+ return ptyprocess.PtyProcess.spawn(args, **kwargs)
316
+
317
+ def close(self, force=True):
318
+ '''This closes the connection with the child application. Note that
319
+ calling close() more than once is valid. This emulates standard Python
320
+ behavior with files. Set force to True if you want to make sure that
321
+ the child is terminated (SIGKILL is sent if the child ignores SIGHUP
322
+ and SIGINT). '''
323
+
324
+ self.flush()
325
+ with _wrap_ptyprocess_err():
326
+ # PtyProcessError may be raised if it is not possible to terminate
327
+ # the child.
328
+ self.ptyproc.close(force=force)
329
+ self.isalive() # Update exit status from ptyproc
330
+ self.child_fd = -1
331
+ self.closed = True
332
+
333
+ def isatty(self):
334
+ '''This returns True if the file descriptor is open and connected to a
335
+ tty(-like) device, else False.
336
+
337
+ On SVR4-style platforms implementing streams, such as SunOS and HP-UX,
338
+ the child pty may not appear as a terminal device. This means
339
+ methods such as setecho(), setwinsize(), getwinsize() may raise an
340
+ IOError. '''
341
+
342
+ return os.isatty(self.child_fd)
343
+
344
+ def waitnoecho(self, timeout=-1):
345
+ '''This waits until the terminal ECHO flag is set False. This returns
346
+ True if the echo mode is off. This returns False if the ECHO flag was
347
+ not set False before the timeout. This can be used to detect when the
348
+ child is waiting for a password. Usually a child application will turn
349
+ off echo mode when it is waiting for the user to enter a password. For
350
+ example, instead of expecting the "password:" prompt you can wait for
351
+ the child to set ECHO off::
352
+
353
+ p = pexpect.spawn('ssh user@example.com')
354
+ p.waitnoecho()
355
+ p.sendline(mypassword)
356
+
357
+ If timeout==-1 then this method will use the value in self.timeout.
358
+ If timeout==None then this method to block until ECHO flag is False.
359
+ '''
360
+
361
+ if timeout == -1:
362
+ timeout = self.timeout
363
+ if timeout is not None:
364
+ end_time = time.time() + timeout
365
+ while True:
366
+ if not self.getecho():
367
+ return True
368
+ if timeout < 0 and timeout is not None:
369
+ return False
370
+ if timeout is not None:
371
+ timeout = end_time - time.time()
372
+ time.sleep(0.1)
373
+
374
+ def getecho(self):
375
+ '''This returns the terminal echo mode. This returns True if echo is
376
+ on or False if echo is off. Child applications that are expecting you
377
+ to enter a password often set ECHO False. See waitnoecho().
378
+
379
+ Not supported on platforms where ``isatty()`` returns False. '''
380
+ return self.ptyproc.getecho()
381
+
382
+ def setecho(self, state):
383
+ '''This sets the terminal echo mode on or off. Note that anything the
384
+ child sent before the echo will be lost, so you should be sure that
385
+ your input buffer is empty before you call setecho(). For example, the
386
+ following will work as expected::
387
+
388
+ p = pexpect.spawn('cat') # Echo is on by default.
389
+ p.sendline('1234') # We expect see this twice from the child...
390
+ p.expect(['1234']) # ... once from the tty echo...
391
+ p.expect(['1234']) # ... and again from cat itself.
392
+ p.setecho(False) # Turn off tty echo
393
+ p.sendline('abcd') # We will set this only once (echoed by cat).
394
+ p.sendline('wxyz') # We will set this only once (echoed by cat)
395
+ p.expect(['abcd'])
396
+ p.expect(['wxyz'])
397
+
398
+ The following WILL NOT WORK because the lines sent before the setecho
399
+ will be lost::
400
+
401
+ p = pexpect.spawn('cat')
402
+ p.sendline('1234')
403
+ p.setecho(False) # Turn off tty echo
404
+ p.sendline('abcd') # We will set this only once (echoed by cat).
405
+ p.sendline('wxyz') # We will set this only once (echoed by cat)
406
+ p.expect(['1234'])
407
+ p.expect(['1234'])
408
+ p.expect(['abcd'])
409
+ p.expect(['wxyz'])
410
+
411
+
412
+ Not supported on platforms where ``isatty()`` returns False.
413
+ '''
414
+ return self.ptyproc.setecho(state)
415
+
416
+ def read_nonblocking(self, size=1, timeout=-1):
417
+ '''This reads at most size characters from the child application. It
418
+ includes a timeout. If the read does not complete within the timeout
419
+ period then a TIMEOUT exception is raised. If the end of file is read
420
+ then an EOF exception will be raised. If a logfile is specified, a
421
+ copy is written to that log.
422
+
423
+ If timeout is None then the read may block indefinitely.
424
+ If timeout is -1 then the self.timeout value is used. If timeout is 0
425
+ then the child is polled and if there is no data immediately ready
426
+ then this will raise a TIMEOUT exception.
427
+
428
+ The timeout refers only to the amount of time to read at least one
429
+ character. This is not affected by the 'size' parameter, so if you call
430
+ read_nonblocking(size=100, timeout=30) and only one character is
431
+ available right away then one character will be returned immediately.
432
+ It will not wait for 30 seconds for another 99 characters to come in.
433
+
434
+ On the other hand, if there are bytes available to read immediately,
435
+ all those bytes will be read (up to the buffer size). So, if the
436
+ buffer size is 1 megabyte and there is 1 megabyte of data available
437
+ to read, the buffer will be filled, regardless of timeout.
438
+
439
+ This is a wrapper around os.read(). It uses select.select() or
440
+ select.poll() to implement the timeout. '''
441
+
442
+ if self.closed:
443
+ raise ValueError('I/O operation on closed file.')
444
+
445
+ if self.use_poll:
446
+ def select(timeout):
447
+ return poll_ignore_interrupts([self.child_fd], timeout)
448
+ else:
449
+ def select(timeout):
450
+ return select_ignore_interrupts([self.child_fd], [], [], timeout)[0]
451
+
452
+ # If there is data available to read right now, read as much as
453
+ # we can. We do this to increase performance if there are a lot
454
+ # of bytes to be read. This also avoids calling isalive() too
455
+ # often. See also:
456
+ # * https://github.com/pexpect/pexpect/pull/304
457
+ # * http://trac.sagemath.org/ticket/10295
458
+ if select(0):
459
+ try:
460
+ incoming = super(spawn, self).read_nonblocking(size)
461
+ except EOF:
462
+ # Maybe the child is dead: update some attributes in that case
463
+ self.isalive()
464
+ raise
465
+ while len(incoming) < size and select(0):
466
+ try:
467
+ incoming += super(spawn, self).read_nonblocking(size - len(incoming))
468
+ except EOF:
469
+ # Maybe the child is dead: update some attributes in that case
470
+ self.isalive()
471
+ # Don't raise EOF, just return what we read so far.
472
+ return incoming
473
+ return incoming
474
+
475
+ if timeout == -1:
476
+ timeout = self.timeout
477
+
478
+ if not self.isalive():
479
+ # The process is dead, but there may or may not be data
480
+ # available to read. Note that some systems such as Solaris
481
+ # do not give an EOF when the child dies. In fact, you can
482
+ # still try to read from the child_fd -- it will block
483
+ # forever or until TIMEOUT. For that reason, it's important
484
+ # to do this check before calling select() with timeout.
485
+ if select(0):
486
+ return super(spawn, self).read_nonblocking(size)
487
+ self.flag_eof = True
488
+ raise EOF('End Of File (EOF). Braindead platform.')
489
+ elif self.__irix_hack:
490
+ # Irix takes a long time before it realizes a child was terminated.
491
+ # Make sure that the timeout is at least 2 seconds.
492
+ # FIXME So does this mean Irix systems are forced to always have
493
+ # FIXME a 2 second delay when calling read_nonblocking? That sucks.
494
+ if timeout is not None and timeout < 2:
495
+ timeout = 2
496
+
497
+ # Because of the select(0) check above, we know that no data
498
+ # is available right now. But if a non-zero timeout is given
499
+ # (possibly timeout=None), we call select() with a timeout.
500
+ if (timeout != 0) and select(timeout):
501
+ return super(spawn, self).read_nonblocking(size)
502
+
503
+ if not self.isalive():
504
+ # Some platforms, such as Irix, will claim that their
505
+ # processes are alive; timeout on the select; and
506
+ # then finally admit that they are not alive.
507
+ self.flag_eof = True
508
+ raise EOF('End of File (EOF). Very slow platform.')
509
+ else:
510
+ raise TIMEOUT('Timeout exceeded.')
511
+
512
+ def write(self, s):
513
+ '''This is similar to send() except that there is no return value.
514
+ '''
515
+
516
+ self.send(s)
517
+
518
+ def writelines(self, sequence):
519
+ '''This calls write() for each element in the sequence. The sequence
520
+ can be any iterable object producing strings, typically a list of
521
+ strings. This does not add line separators. There is no return value.
522
+ '''
523
+
524
+ for s in sequence:
525
+ self.write(s)
526
+
527
+ def send(self, s):
528
+ '''Sends string ``s`` to the child process, returning the number of
529
+ bytes written. If a logfile is specified, a copy is written to that
530
+ log.
531
+
532
+ The default terminal input mode is canonical processing unless set
533
+ otherwise by the child process. This allows backspace and other line
534
+ processing to be performed prior to transmitting to the receiving
535
+ program. As this is buffered, there is a limited size of such buffer.
536
+
537
+ On Linux systems, this is 4096 (defined by N_TTY_BUF_SIZE). All
538
+ other systems honor the POSIX.1 definition PC_MAX_CANON -- 1024
539
+ on OSX, 256 on OpenSolaris, and 1920 on FreeBSD.
540
+
541
+ This value may be discovered using fpathconf(3)::
542
+
543
+ >>> from os import fpathconf
544
+ >>> print(fpathconf(0, 'PC_MAX_CANON'))
545
+ 256
546
+
547
+ On such a system, only 256 bytes may be received per line. Any
548
+ subsequent bytes received will be discarded. BEL (``'\a'``) is then
549
+ sent to output if IMAXBEL (termios.h) is set by the tty driver.
550
+ This is usually enabled by default. Linux does not honor this as
551
+ an option -- it behaves as though it is always set on.
552
+
553
+ Canonical input processing may be disabled altogether by executing
554
+ a shell, then stty(1), before executing the final program::
555
+
556
+ >>> bash = pexpect.spawn('/bin/bash', echo=False)
557
+ >>> bash.sendline('stty -icanon')
558
+ >>> bash.sendline('base64')
559
+ >>> bash.sendline('x' * 5000)
560
+ '''
561
+
562
+ if self.delaybeforesend is not None:
563
+ time.sleep(self.delaybeforesend)
564
+
565
+ s = self._coerce_send_string(s)
566
+ self._log(s, 'send')
567
+
568
+ b = self._encoder.encode(s, final=False)
569
+ return os.write(self.child_fd, b)
570
+
571
+ def sendline(self, s=''):
572
+ '''Wraps send(), sending string ``s`` to child process, with
573
+ ``os.linesep`` automatically appended. Returns number of bytes
574
+ written. Only a limited number of bytes may be sent for each
575
+ line in the default terminal mode, see docstring of :meth:`send`.
576
+ '''
577
+ s = self._coerce_send_string(s)
578
+ return self.send(s + self.linesep)
579
+
580
+ def _log_control(self, s):
581
+ """Write control characters to the appropriate log files"""
582
+ if self.encoding is not None:
583
+ s = s.decode(self.encoding, 'replace')
584
+ self._log(s, 'send')
585
+
586
+ def sendcontrol(self, char):
587
+ '''Helper method that wraps send() with mnemonic access for sending control
588
+ character to the child (such as Ctrl-C or Ctrl-D). For example, to send
589
+ Ctrl-G (ASCII 7, bell, '\a')::
590
+
591
+ child.sendcontrol('g')
592
+
593
+ See also, sendintr() and sendeof().
594
+ '''
595
+ n, byte = self.ptyproc.sendcontrol(char)
596
+ self._log_control(byte)
597
+ return n
598
+
599
+ def sendeof(self):
600
+ '''This sends an EOF to the child. This sends a character which causes
601
+ the pending parent output buffer to be sent to the waiting child
602
+ program without waiting for end-of-line. If it is the first character
603
+ of the line, the read() in the user program returns 0, which signifies
604
+ end-of-file. This means to work as expected a sendeof() has to be
605
+ called at the beginning of a line. This method does not send a newline.
606
+ It is the responsibility of the caller to ensure the eof is sent at the
607
+ beginning of a line. '''
608
+
609
+ n, byte = self.ptyproc.sendeof()
610
+ self._log_control(byte)
611
+
612
+ def sendintr(self):
613
+ '''This sends a SIGINT to the child. It does not require
614
+ the SIGINT to be the first character on a line. '''
615
+
616
+ n, byte = self.ptyproc.sendintr()
617
+ self._log_control(byte)
618
+
619
+ @property
620
+ def flag_eof(self):
621
+ return self.ptyproc.flag_eof
622
+
623
+ @flag_eof.setter
624
+ def flag_eof(self, value):
625
+ self.ptyproc.flag_eof = value
626
+
627
+ def eof(self):
628
+ '''This returns True if the EOF exception was ever raised.
629
+ '''
630
+ return self.flag_eof
631
+
632
+ def terminate(self, force=False):
633
+ '''This forces a child process to terminate. It starts nicely with
634
+ SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
635
+ returns True if the child was terminated. This returns False if the
636
+ child could not be terminated. '''
637
+
638
+ if not self.isalive():
639
+ return True
640
+ try:
641
+ self.kill(signal.SIGHUP)
642
+ time.sleep(self.delayafterterminate)
643
+ if not self.isalive():
644
+ return True
645
+ self.kill(signal.SIGCONT)
646
+ time.sleep(self.delayafterterminate)
647
+ if not self.isalive():
648
+ return True
649
+ self.kill(signal.SIGINT)
650
+ time.sleep(self.delayafterterminate)
651
+ if not self.isalive():
652
+ return True
653
+ if force:
654
+ self.kill(signal.SIGKILL)
655
+ time.sleep(self.delayafterterminate)
656
+ if not self.isalive():
657
+ return True
658
+ else:
659
+ return False
660
+ return False
661
+ except OSError:
662
+ # I think there are kernel timing issues that sometimes cause
663
+ # this to happen. I think isalive() reports True, but the
664
+ # process is dead to the kernel.
665
+ # Make one last attempt to see if the kernel is up to date.
666
+ time.sleep(self.delayafterterminate)
667
+ if not self.isalive():
668
+ return True
669
+ else:
670
+ return False
671
+
672
+ def wait(self):
673
+ '''This waits until the child exits. This is a blocking call. This will
674
+ not read any data from the child, so this will block forever if the
675
+ child has unread output and has terminated. In other words, the child
676
+ may have printed output then called exit(), but, the child is
677
+ technically still alive until its output is read by the parent.
678
+
679
+ This method is non-blocking if :meth:`wait` has already been called
680
+ previously or :meth:`isalive` method returns False. It simply returns
681
+ the previously determined exit status.
682
+ '''
683
+
684
+ ptyproc = self.ptyproc
685
+ with _wrap_ptyprocess_err():
686
+ # exception may occur if "Is some other process attempting
687
+ # "job control with our child pid?"
688
+ exitstatus = ptyproc.wait()
689
+ self.status = ptyproc.status
690
+ self.exitstatus = ptyproc.exitstatus
691
+ self.signalstatus = ptyproc.signalstatus
692
+ self.terminated = True
693
+
694
+ return exitstatus
695
+
696
+ def isalive(self):
697
+ '''This tests if the child process is running or not. This is
698
+ non-blocking. If the child was terminated then this will read the
699
+ exitstatus or signalstatus of the child. This returns True if the child
700
+ process appears to be running or False if not. It can take literally
701
+ SECONDS for Solaris to return the right status. '''
702
+
703
+ ptyproc = self.ptyproc
704
+ with _wrap_ptyprocess_err():
705
+ alive = ptyproc.isalive()
706
+
707
+ if not alive:
708
+ self.status = ptyproc.status
709
+ self.exitstatus = ptyproc.exitstatus
710
+ self.signalstatus = ptyproc.signalstatus
711
+ self.terminated = True
712
+
713
+ return alive
714
+
715
+ def kill(self, sig):
716
+
717
+ '''This sends the given signal to the child application. In keeping
718
+ with UNIX tradition it has a misleading name. It does not necessarily
719
+ kill the child unless you send the right signal. '''
720
+
721
+ # Same as os.kill, but the pid is given for you.
722
+ if self.isalive():
723
+ os.kill(self.pid, sig)
724
+
725
+ def getwinsize(self):
726
+ '''This returns the terminal window size of the child tty. The return
727
+ value is a tuple of (rows, cols). '''
728
+ return self.ptyproc.getwinsize()
729
+
730
+ def setwinsize(self, rows, cols):
731
+ '''This sets the terminal window size of the child tty. This will cause
732
+ a SIGWINCH signal to be sent to the child. This does not change the
733
+ physical window size. It changes the size reported to TTY-aware
734
+ applications like vi or curses -- applications that respond to the
735
+ SIGWINCH signal. '''
736
+ return self.ptyproc.setwinsize(rows, cols)
737
+
738
+
739
+ def interact(self, escape_character=chr(29),
740
+ input_filter=None, output_filter=None):
741
+
742
+ '''This gives control of the child process to the interactive user (the
743
+ human at the keyboard). Keystrokes are sent to the child process, and
744
+ the stdout and stderr output of the child process is printed. This
745
+ simply echos the child stdout and child stderr to the real stdout and
746
+ it echos the real stdin to the child stdin. When the user types the
747
+ escape_character this method will return None. The escape_character
748
+ will not be transmitted. The default for escape_character is
749
+ entered as ``Ctrl - ]``, the very same as BSD telnet. To prevent
750
+ escaping, escape_character may be set to None.
751
+
752
+ If a logfile is specified, then the data sent and received from the
753
+ child process in interact mode is duplicated to the given log.
754
+
755
+ You may pass in optional input and output filter functions. These
756
+ functions should take bytes array and return bytes array too. Even
757
+ with ``encoding='utf-8'`` support, meth:`interact` will always pass
758
+ input_filter and output_filter bytes. You may need to wrap your
759
+ function to decode and encode back to UTF-8.
760
+
761
+ The output_filter will be passed all the output from the child process.
762
+ The input_filter will be passed all the keyboard input from the user.
763
+ The input_filter is run BEFORE the check for the escape_character.
764
+
765
+ Note that if you change the window size of the parent the SIGWINCH
766
+ signal will not be passed through to the child. If you want the child
767
+ window size to change when the parent's window size changes then do
768
+ something like the following example::
769
+
770
+ import pexpect, struct, fcntl, termios, signal, sys
771
+ def sigwinch_passthrough (sig, data):
772
+ s = struct.pack("HHHH", 0, 0, 0, 0)
773
+ a = struct.unpack('hhhh', fcntl.ioctl(sys.stdout.fileno(),
774
+ termios.TIOCGWINSZ , s))
775
+ if not p.closed:
776
+ p.setwinsize(a[0],a[1])
777
+
778
+ # Note this 'p' is global and used in sigwinch_passthrough.
779
+ p = pexpect.spawn('/bin/bash')
780
+ signal.signal(signal.SIGWINCH, sigwinch_passthrough)
781
+ p.interact()
782
+ '''
783
+
784
+ # Flush the buffer.
785
+ self.write_to_stdout(self.buffer)
786
+ self.stdout.flush()
787
+ self._buffer = self.buffer_type()
788
+ mode = tty.tcgetattr(self.STDIN_FILENO)
789
+ tty.setraw(self.STDIN_FILENO)
790
+ if escape_character is not None and PY3:
791
+ escape_character = escape_character.encode('latin-1')
792
+ try:
793
+ self.__interact_copy(escape_character, input_filter, output_filter)
794
+ finally:
795
+ tty.tcsetattr(self.STDIN_FILENO, tty.TCSAFLUSH, mode)
796
+
797
+ def __interact_writen(self, fd, data):
798
+ '''This is used by the interact() method.
799
+ '''
800
+
801
+ while data != b'' and self.isalive():
802
+ n = os.write(fd, data)
803
+ data = data[n:]
804
+
805
+ def __interact_read(self, fd):
806
+ '''This is used by the interact() method.
807
+ '''
808
+
809
+ return os.read(fd, 1000)
810
+
811
+ def __interact_copy(
812
+ self, escape_character=None, input_filter=None, output_filter=None
813
+ ):
814
+
815
+ '''This is used by the interact() method.
816
+ '''
817
+
818
+ while self.isalive():
819
+ if self.use_poll:
820
+ r = poll_ignore_interrupts([self.child_fd, self.STDIN_FILENO])
821
+ else:
822
+ r, w, e = select_ignore_interrupts(
823
+ [self.child_fd, self.STDIN_FILENO], [], []
824
+ )
825
+ if self.child_fd in r:
826
+ try:
827
+ data = self.__interact_read(self.child_fd)
828
+ except OSError as err:
829
+ if err.args[0] == errno.EIO:
830
+ # Linux-style EOF
831
+ break
832
+ raise
833
+ if data == b'':
834
+ # BSD-style EOF
835
+ break
836
+ if output_filter:
837
+ data = output_filter(data)
838
+ self._log(data, 'read')
839
+ os.write(self.STDOUT_FILENO, data)
840
+ if self.STDIN_FILENO in r:
841
+ data = self.__interact_read(self.STDIN_FILENO)
842
+ if input_filter:
843
+ data = input_filter(data)
844
+ i = -1
845
+ if escape_character is not None:
846
+ i = data.rfind(escape_character)
847
+ if i != -1:
848
+ data = data[:i]
849
+ if data:
850
+ self._log(data, 'send')
851
+ self.__interact_writen(self.child_fd, data)
852
+ break
853
+ self._log(data, 'send')
854
+ self.__interact_writen(self.child_fd, data)
855
+
856
+
857
+ def spawnu(*args, **kwargs):
858
+ """Deprecated: pass encoding to spawn() instead."""
859
+ kwargs.setdefault('encoding', 'utf-8')
860
+ return spawn(*args, **kwargs)
evalkit_internvl/lib/python3.10/site-packages/pexpect/pxssh.py ADDED
@@ -0,0 +1,540 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''This class extends pexpect.spawn to specialize setting up SSH connections.
2
+ This adds methods for login, logout, and expecting the shell prompt.
3
+
4
+ PEXPECT LICENSE
5
+
6
+ This license is approved by the OSI and FSF as GPL-compatible.
7
+ http://opensource.org/licenses/isc-license.txt
8
+
9
+ Copyright (c) 2012, Noah Spurrier <noah@noah.org>
10
+ PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
11
+ PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
12
+ COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
13
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16
+ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
+
21
+ '''
22
+
23
+ from pexpect import ExceptionPexpect, TIMEOUT, EOF, spawn
24
+ import time
25
+ import os
26
+ import sys
27
+ import re
28
+
29
+ __all__ = ['ExceptionPxssh', 'pxssh']
30
+
31
+ # Exception classes used by this module.
32
+ class ExceptionPxssh(ExceptionPexpect):
33
+ '''Raised for pxssh exceptions.
34
+ '''
35
+
36
+ if sys.version_info > (3, 0):
37
+ from shlex import quote
38
+ else:
39
+ _find_unsafe = re.compile(r'[^\w@%+=:,./-]').search
40
+
41
+ def quote(s):
42
+ """Return a shell-escaped version of the string *s*."""
43
+ if not s:
44
+ return "''"
45
+ if _find_unsafe(s) is None:
46
+ return s
47
+
48
+ # use single quotes, and put single quotes into double quotes
49
+ # the string $'b is then quoted as '$'"'"'b'
50
+ return "'" + s.replace("'", "'\"'\"'") + "'"
51
+
52
+ class pxssh (spawn):
53
+ '''This class extends pexpect.spawn to specialize setting up SSH
54
+ connections. This adds methods for login, logout, and expecting the shell
55
+ prompt. It does various tricky things to handle many situations in the SSH
56
+ login process. For example, if the session is your first login, then pxssh
57
+ automatically accepts the remote certificate; or if you have public key
58
+ authentication setup then pxssh won't wait for the password prompt.
59
+
60
+ pxssh uses the shell prompt to synchronize output from the remote host. In
61
+ order to make this more robust it sets the shell prompt to something more
62
+ unique than just $ or #. This should work on most Borne/Bash or Csh style
63
+ shells.
64
+
65
+ Example that runs a few commands on a remote server and prints the result::
66
+
67
+ from pexpect import pxssh
68
+ import getpass
69
+ try:
70
+ s = pxssh.pxssh()
71
+ hostname = raw_input('hostname: ')
72
+ username = raw_input('username: ')
73
+ password = getpass.getpass('password: ')
74
+ s.login(hostname, username, password)
75
+ s.sendline('uptime') # run a command
76
+ s.prompt() # match the prompt
77
+ print(s.before) # print everything before the prompt.
78
+ s.sendline('ls -l')
79
+ s.prompt()
80
+ print(s.before)
81
+ s.sendline('df')
82
+ s.prompt()
83
+ print(s.before)
84
+ s.logout()
85
+ except pxssh.ExceptionPxssh as e:
86
+ print("pxssh failed on login.")
87
+ print(e)
88
+
89
+ Example showing how to specify SSH options::
90
+
91
+ from pexpect import pxssh
92
+ s = pxssh.pxssh(options={
93
+ "StrictHostKeyChecking": "no",
94
+ "UserKnownHostsFile": "/dev/null"})
95
+ ...
96
+
97
+ Note that if you have ssh-agent running while doing development with pxssh
98
+ then this can lead to a lot of confusion. Many X display managers (xdm,
99
+ gdm, kdm, etc.) will automatically start a GUI agent. You may see a GUI
100
+ dialog box popup asking for a password during development. You should turn
101
+ off any key agents during testing. The 'force_password' attribute will turn
102
+ off public key authentication. This will only work if the remote SSH server
103
+ is configured to allow password logins. Example of using 'force_password'
104
+ attribute::
105
+
106
+ s = pxssh.pxssh()
107
+ s.force_password = True
108
+ hostname = raw_input('hostname: ')
109
+ username = raw_input('username: ')
110
+ password = getpass.getpass('password: ')
111
+ s.login (hostname, username, password)
112
+
113
+ `debug_command_string` is only for the test suite to confirm that the string
114
+ generated for SSH is correct, using this will not allow you to do
115
+ anything other than get a string back from `pxssh.pxssh.login()`.
116
+ '''
117
+
118
+ def __init__ (self, timeout=30, maxread=2000, searchwindowsize=None,
119
+ logfile=None, cwd=None, env=None, ignore_sighup=True, echo=True,
120
+ options={}, encoding=None, codec_errors='strict',
121
+ debug_command_string=False, use_poll=False):
122
+
123
+ spawn.__init__(self, None, timeout=timeout, maxread=maxread,
124
+ searchwindowsize=searchwindowsize, logfile=logfile,
125
+ cwd=cwd, env=env, ignore_sighup=ignore_sighup, echo=echo,
126
+ encoding=encoding, codec_errors=codec_errors, use_poll=use_poll)
127
+
128
+ self.name = '<pxssh>'
129
+
130
+ #SUBTLE HACK ALERT! Note that the command that SETS the prompt uses a
131
+ #slightly different string than the regular expression to match it. This
132
+ #is because when you set the prompt the command will echo back, but we
133
+ #don't want to match the echoed command. So if we make the set command
134
+ #slightly different than the regex we eliminate the problem. To make the
135
+ #set command different we add a backslash in front of $. The $ doesn't
136
+ #need to be escaped, but it doesn't hurt and serves to make the set
137
+ #prompt command different than the regex.
138
+
139
+ # used to match the command-line prompt
140
+ self.UNIQUE_PROMPT = r"\[PEXPECT\][\$\#] "
141
+ self.PROMPT = self.UNIQUE_PROMPT
142
+
143
+ # used to set shell command-line prompt to UNIQUE_PROMPT.
144
+ self.PROMPT_SET_SH = r"PS1='[PEXPECT]\$ '"
145
+ self.PROMPT_SET_CSH = r"set prompt='[PEXPECT]\$ '"
146
+ self.PROMPT_SET_ZSH = "prompt restore;\nPS1='[PEXPECT]%(!.#.$) '"
147
+ self.SSH_OPTS = (" -o 'PubkeyAuthentication=no'")
148
+ # Disabling host key checking, makes you vulnerable to MITM attacks.
149
+ # + " -o 'StrictHostKeyChecking=no'"
150
+ # + " -o 'UserKnownHostsFile /dev/null' ")
151
+ # Disabling X11 forwarding gets rid of the annoying SSH_ASKPASS from
152
+ # displaying a GUI password dialog. I have not figured out how to
153
+ # disable only SSH_ASKPASS without also disabling X11 forwarding.
154
+ # Unsetting SSH_ASKPASS on the remote side doesn't disable it! Annoying!
155
+ #self.SSH_OPTS = "-x -o 'PubkeyAuthentication=no'"
156
+ self.force_password = False
157
+
158
+ self.debug_command_string = debug_command_string
159
+
160
+ # User defined SSH options, eg,
161
+ # ssh.otions = dict(StrictHostKeyChecking="no",UserKnownHostsFile="/dev/null")
162
+ self.options = options
163
+
164
+ def levenshtein_distance(self, a, b):
165
+ '''This calculates the Levenshtein distance between a and b.
166
+ '''
167
+
168
+ n, m = len(a), len(b)
169
+ if n > m:
170
+ a,b = b,a
171
+ n,m = m,n
172
+ current = range(n+1)
173
+ for i in range(1,m+1):
174
+ previous, current = current, [i]+[0]*n
175
+ for j in range(1,n+1):
176
+ add, delete = previous[j]+1, current[j-1]+1
177
+ change = previous[j-1]
178
+ if a[j-1] != b[i-1]:
179
+ change = change + 1
180
+ current[j] = min(add, delete, change)
181
+ return current[n]
182
+
183
+ def try_read_prompt(self, timeout_multiplier):
184
+ '''This facilitates using communication timeouts to perform
185
+ synchronization as quickly as possible, while supporting high latency
186
+ connections with a tunable worst case performance. Fast connections
187
+ should be read almost immediately. Worst case performance for this
188
+ method is timeout_multiplier * 3 seconds.
189
+ '''
190
+
191
+ # maximum time allowed to read the first response
192
+ first_char_timeout = timeout_multiplier * 0.5
193
+
194
+ # maximum time allowed between subsequent characters
195
+ inter_char_timeout = timeout_multiplier * 0.1
196
+
197
+ # maximum time for reading the entire prompt
198
+ total_timeout = timeout_multiplier * 3.0
199
+
200
+ prompt = self.string_type()
201
+ begin = time.time()
202
+ expired = 0.0
203
+ timeout = first_char_timeout
204
+
205
+ while expired < total_timeout:
206
+ try:
207
+ prompt += self.read_nonblocking(size=1, timeout=timeout)
208
+ expired = time.time() - begin # updated total time expired
209
+ timeout = inter_char_timeout
210
+ except TIMEOUT:
211
+ break
212
+
213
+ return prompt
214
+
215
+ def sync_original_prompt (self, sync_multiplier=1.0):
216
+ '''This attempts to find the prompt. Basically, press enter and record
217
+ the response; press enter again and record the response; if the two
218
+ responses are similar then assume we are at the original prompt.
219
+ This can be a slow function. Worst case with the default sync_multiplier
220
+ can take 12 seconds. Low latency connections are more likely to fail
221
+ with a low sync_multiplier. Best case sync time gets worse with a
222
+ high sync multiplier (500 ms with default). '''
223
+
224
+ # All of these timing pace values are magic.
225
+ # I came up with these based on what seemed reliable for
226
+ # connecting to a heavily loaded machine I have.
227
+ self.sendline()
228
+ time.sleep(0.1)
229
+
230
+ try:
231
+ # Clear the buffer before getting the prompt.
232
+ self.try_read_prompt(sync_multiplier)
233
+ except TIMEOUT:
234
+ pass
235
+
236
+ self.sendline()
237
+ x = self.try_read_prompt(sync_multiplier)
238
+
239
+ self.sendline()
240
+ a = self.try_read_prompt(sync_multiplier)
241
+
242
+ self.sendline()
243
+ b = self.try_read_prompt(sync_multiplier)
244
+
245
+ ld = self.levenshtein_distance(a,b)
246
+ len_a = len(a)
247
+ if len_a == 0:
248
+ return False
249
+ if float(ld)/len_a < 0.4:
250
+ return True
251
+ return False
252
+
253
+ ### TODO: This is getting messy and I'm pretty sure this isn't perfect.
254
+ ### TODO: I need to draw a flow chart for this.
255
+ ### TODO: Unit tests for SSH tunnels, remote SSH command exec, disabling original prompt sync
256
+ def login (self, server, username=None, password='', terminal_type='ansi',
257
+ original_prompt=r"[#$]", login_timeout=10, port=None,
258
+ auto_prompt_reset=True, ssh_key=None, quiet=True,
259
+ sync_multiplier=1, check_local_ip=True,
260
+ password_regex=r'(?i)(?:password:)|(?:passphrase for key)',
261
+ ssh_tunnels={}, spawn_local_ssh=True,
262
+ sync_original_prompt=True, ssh_config=None, cmd='ssh'):
263
+ '''This logs the user into the given server.
264
+
265
+ It uses 'original_prompt' to try to find the prompt right after login.
266
+ When it finds the prompt it immediately tries to reset the prompt to
267
+ something more easily matched. The default 'original_prompt' is very
268
+ optimistic and is easily fooled. It's more reliable to try to match the original
269
+ prompt as exactly as possible to prevent false matches by server
270
+ strings such as the "Message Of The Day". On many systems you can
271
+ disable the MOTD on the remote server by creating a zero-length file
272
+ called :file:`~/.hushlogin` on the remote server. If a prompt cannot be found
273
+ then this will not necessarily cause the login to fail. In the case of
274
+ a timeout when looking for the prompt we assume that the original
275
+ prompt was so weird that we could not match it, so we use a few tricks
276
+ to guess when we have reached the prompt. Then we hope for the best and
277
+ blindly try to reset the prompt to something more unique. If that fails
278
+ then login() raises an :class:`ExceptionPxssh` exception.
279
+
280
+ In some situations it is not possible or desirable to reset the
281
+ original prompt. In this case, pass ``auto_prompt_reset=False`` to
282
+ inhibit setting the prompt to the UNIQUE_PROMPT. Remember that pxssh
283
+ uses a unique prompt in the :meth:`prompt` method. If the original prompt is
284
+ not reset then this will disable the :meth:`prompt` method unless you
285
+ manually set the :attr:`PROMPT` attribute.
286
+
287
+ Set ``password_regex`` if there is a MOTD message with `password` in it.
288
+ Changing this is like playing in traffic, don't (p)expect it to match straight
289
+ away.
290
+
291
+ If you require to connect to another SSH server from the your original SSH
292
+ connection set ``spawn_local_ssh`` to `False` and this will use your current
293
+ session to do so. Setting this option to `False` and not having an active session
294
+ will trigger an error.
295
+
296
+ Set ``ssh_key`` to a file path to an SSH private key to use that SSH key
297
+ for the session authentication.
298
+ Set ``ssh_key`` to `True` to force passing the current SSH authentication socket
299
+ to the desired ``hostname``.
300
+
301
+ Set ``ssh_config`` to a file path string of an SSH client config file to pass that
302
+ file to the client to handle itself. You may set any options you wish in here, however
303
+ doing so will require you to post extra information that you may not want to if you
304
+ run into issues.
305
+
306
+ Alter the ``cmd`` to change the ssh client used, or to prepend it with network
307
+ namespaces. For example ```cmd="ip netns exec vlan2 ssh"``` to execute the ssh in
308
+ network namespace named ```vlan```.
309
+ '''
310
+
311
+ session_regex_array = ["(?i)are you sure you want to continue connecting", original_prompt, password_regex, "(?i)permission denied", "(?i)terminal type", TIMEOUT]
312
+ session_init_regex_array = []
313
+ session_init_regex_array.extend(session_regex_array)
314
+ session_init_regex_array.extend(["(?i)connection closed by remote host", EOF])
315
+
316
+ ssh_options = ''.join([" -o '%s=%s'" % (o, v) for (o, v) in self.options.items()])
317
+ if quiet:
318
+ ssh_options = ssh_options + ' -q'
319
+ if not check_local_ip:
320
+ ssh_options = ssh_options + " -o'NoHostAuthenticationForLocalhost=yes'"
321
+ if self.force_password:
322
+ ssh_options = ssh_options + ' ' + self.SSH_OPTS
323
+ if ssh_config is not None:
324
+ if spawn_local_ssh and not os.path.isfile(ssh_config):
325
+ raise ExceptionPxssh('SSH config does not exist or is not a file.')
326
+ ssh_options = ssh_options + ' -F ' + ssh_config
327
+ if port is not None:
328
+ ssh_options = ssh_options + ' -p %s'%(str(port))
329
+ if ssh_key is not None:
330
+ # Allow forwarding our SSH key to the current session
331
+ if ssh_key==True:
332
+ ssh_options = ssh_options + ' -A'
333
+ else:
334
+ if spawn_local_ssh and not os.path.isfile(ssh_key):
335
+ raise ExceptionPxssh('private ssh key does not exist or is not a file.')
336
+ ssh_options = ssh_options + ' -i %s' % (ssh_key)
337
+
338
+ # SSH tunnels, make sure you know what you're putting into the lists
339
+ # under each heading. Do not expect these to open 100% of the time,
340
+ # The port you're requesting might be bound.
341
+ #
342
+ # The structure should be like this:
343
+ # { 'local': ['2424:localhost:22'], # Local SSH tunnels
344
+ # 'remote': ['2525:localhost:22'], # Remote SSH tunnels
345
+ # 'dynamic': [8888] } # Dynamic/SOCKS tunnels
346
+ if ssh_tunnels!={} and isinstance({},type(ssh_tunnels)):
347
+ tunnel_types = {
348
+ 'local':'L',
349
+ 'remote':'R',
350
+ 'dynamic':'D'
351
+ }
352
+ for tunnel_type in tunnel_types:
353
+ cmd_type = tunnel_types[tunnel_type]
354
+ if tunnel_type in ssh_tunnels:
355
+ tunnels = ssh_tunnels[tunnel_type]
356
+ for tunnel in tunnels:
357
+ if spawn_local_ssh==False:
358
+ tunnel = quote(str(tunnel))
359
+ ssh_options = ssh_options + ' -' + cmd_type + ' ' + str(tunnel)
360
+
361
+ if username is not None:
362
+ ssh_options = ssh_options + ' -l ' + username
363
+ elif ssh_config is None:
364
+ raise TypeError('login() needs either a username or an ssh_config')
365
+ else: # make sure ssh_config has an entry for the server with a username
366
+ with open(ssh_config, 'rt') as f:
367
+ lines = [l.strip() for l in f.readlines()]
368
+
369
+ server_regex = r'^Host\s+%s\s*$' % server
370
+ user_regex = r'^User\s+\w+\s*$'
371
+ config_has_server = False
372
+ server_has_username = False
373
+ for line in lines:
374
+ if not config_has_server and re.match(server_regex, line, re.IGNORECASE):
375
+ config_has_server = True
376
+ elif config_has_server and 'hostname' in line.lower():
377
+ pass
378
+ elif config_has_server and 'host' in line.lower():
379
+ server_has_username = False # insurance
380
+ break # we have left the relevant section
381
+ elif config_has_server and re.match(user_regex, line, re.IGNORECASE):
382
+ server_has_username = True
383
+ break
384
+
385
+ if lines:
386
+ del line
387
+
388
+ del lines
389
+
390
+ if not config_has_server:
391
+ raise TypeError('login() ssh_config has no Host entry for %s' % server)
392
+ elif not server_has_username:
393
+ raise TypeError('login() ssh_config has no user entry for %s' % server)
394
+
395
+ cmd += " %s %s" % (ssh_options, server)
396
+ if self.debug_command_string:
397
+ return(cmd)
398
+
399
+ # Are we asking for a local ssh command or to spawn one in another session?
400
+ if spawn_local_ssh:
401
+ spawn._spawn(self, cmd)
402
+ else:
403
+ self.sendline(cmd)
404
+
405
+ # This does not distinguish between a remote server 'password' prompt
406
+ # and a local ssh 'passphrase' prompt (for unlocking a private key).
407
+ i = self.expect(session_init_regex_array, timeout=login_timeout)
408
+
409
+ # First phase
410
+ if i==0:
411
+ # New certificate -- always accept it.
412
+ # This is what you get if SSH does not have the remote host's
413
+ # public key stored in the 'known_hosts' cache.
414
+ self.sendline("yes")
415
+ i = self.expect(session_regex_array)
416
+ if i==2: # password or passphrase
417
+ self.sendline(password)
418
+ i = self.expect(session_regex_array)
419
+ if i==4:
420
+ self.sendline(terminal_type)
421
+ i = self.expect(session_regex_array)
422
+ if i==7:
423
+ self.close()
424
+ raise ExceptionPxssh('Could not establish connection to host')
425
+
426
+ # Second phase
427
+ if i==0:
428
+ # This is weird. This should not happen twice in a row.
429
+ self.close()
430
+ raise ExceptionPxssh('Weird error. Got "are you sure" prompt twice.')
431
+ elif i==1: # can occur if you have a public key pair set to authenticate.
432
+ ### TODO: May NOT be OK if expect() got tricked and matched a false prompt.
433
+ pass
434
+ elif i==2: # password prompt again
435
+ # For incorrect passwords, some ssh servers will
436
+ # ask for the password again, others return 'denied' right away.
437
+ # If we get the password prompt again then this means
438
+ # we didn't get the password right the first time.
439
+ self.close()
440
+ raise ExceptionPxssh('password refused')
441
+ elif i==3: # permission denied -- password was bad.
442
+ self.close()
443
+ raise ExceptionPxssh('permission denied')
444
+ elif i==4: # terminal type again? WTF?
445
+ self.close()
446
+ raise ExceptionPxssh('Weird error. Got "terminal type" prompt twice.')
447
+ elif i==5: # Timeout
448
+ #This is tricky... I presume that we are at the command-line prompt.
449
+ #It may be that the shell prompt was so weird that we couldn't match
450
+ #it. Or it may be that we couldn't log in for some other reason. I
451
+ #can't be sure, but it's safe to guess that we did login because if
452
+ #I presume wrong and we are not logged in then this should be caught
453
+ #later when I try to set the shell prompt.
454
+ pass
455
+ elif i==6: # Connection closed by remote host
456
+ self.close()
457
+ raise ExceptionPxssh('connection closed')
458
+ else: # Unexpected
459
+ self.close()
460
+ raise ExceptionPxssh('unexpected login response')
461
+ if sync_original_prompt:
462
+ if not self.sync_original_prompt(sync_multiplier):
463
+ self.close()
464
+ raise ExceptionPxssh('could not synchronize with original prompt')
465
+ # We appear to be in.
466
+ # set shell prompt to something unique.
467
+ if auto_prompt_reset:
468
+ if not self.set_unique_prompt():
469
+ self.close()
470
+ raise ExceptionPxssh('could not set shell prompt '
471
+ '(received: %r, expected: %r).' % (
472
+ self.before, self.PROMPT,))
473
+ return True
474
+
475
+ def logout (self):
476
+ '''Sends exit to the remote shell.
477
+
478
+ If there are stopped jobs then this automatically sends exit twice.
479
+ '''
480
+ self.sendline("exit")
481
+ index = self.expect([EOF, "(?i)there are stopped jobs"])
482
+ if index==1:
483
+ self.sendline("exit")
484
+ self.expect(EOF)
485
+ self.close()
486
+
487
+ def prompt(self, timeout=-1):
488
+ '''Match the next shell prompt.
489
+
490
+ This is little more than a short-cut to the :meth:`~pexpect.spawn.expect`
491
+ method. Note that if you called :meth:`login` with
492
+ ``auto_prompt_reset=False``, then before calling :meth:`prompt` you must
493
+ set the :attr:`PROMPT` attribute to a regex that it will use for
494
+ matching the prompt.
495
+
496
+ Calling :meth:`prompt` will erase the contents of the :attr:`before`
497
+ attribute even if no prompt is ever matched. If timeout is not given or
498
+ it is set to -1 then self.timeout is used.
499
+
500
+ :return: True if the shell prompt was matched, False if the timeout was
501
+ reached.
502
+ '''
503
+
504
+ if timeout == -1:
505
+ timeout = self.timeout
506
+ i = self.expect([self.PROMPT, TIMEOUT], timeout=timeout)
507
+ if i==1:
508
+ return False
509
+ return True
510
+
511
+ def set_unique_prompt(self):
512
+ '''This sets the remote prompt to something more unique than ``#`` or ``$``.
513
+ This makes it easier for the :meth:`prompt` method to match the shell prompt
514
+ unambiguously. This method is called automatically by the :meth:`login`
515
+ method, but you may want to call it manually if you somehow reset the
516
+ shell prompt. For example, if you 'su' to a different user then you
517
+ will need to manually reset the prompt. This sends shell commands to
518
+ the remote host to set the prompt, so this assumes the remote host is
519
+ ready to receive commands.
520
+
521
+ Alternatively, you may use your own prompt pattern. In this case you
522
+ should call :meth:`login` with ``auto_prompt_reset=False``; then set the
523
+ :attr:`PROMPT` attribute to a regular expression. After that, the
524
+ :meth:`prompt` method will try to match your prompt pattern.
525
+ '''
526
+
527
+ self.sendline("unset PROMPT_COMMAND")
528
+ self.sendline(self.PROMPT_SET_SH) # sh-style
529
+ i = self.expect ([TIMEOUT, self.PROMPT], timeout=10)
530
+ if i == 0: # csh-style
531
+ self.sendline(self.PROMPT_SET_CSH)
532
+ i = self.expect([TIMEOUT, self.PROMPT], timeout=10)
533
+ if i == 0: # zsh-style
534
+ self.sendline(self.PROMPT_SET_ZSH)
535
+ i = self.expect([TIMEOUT, self.PROMPT], timeout=10)
536
+ if i == 0:
537
+ return False
538
+ return True
539
+
540
+ # vi:ts=4:sw=4:expandtab:ft=python:
evalkit_internvl/lib/python3.10/site-packages/pexpect/replwrap.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Generic wrapper for read-eval-print-loops, a.k.a. interactive shells
2
+ """
3
+ import os.path
4
+ import signal
5
+ import sys
6
+
7
+ import pexpect
8
+
9
+ PY3 = (sys.version_info[0] >= 3)
10
+
11
+ if PY3:
12
+ basestring = str
13
+
14
+ PEXPECT_PROMPT = u'[PEXPECT_PROMPT>'
15
+ PEXPECT_CONTINUATION_PROMPT = u'[PEXPECT_PROMPT+'
16
+
17
+ class REPLWrapper(object):
18
+ """Wrapper for a REPL.
19
+
20
+ :param cmd_or_spawn: This can either be an instance of :class:`pexpect.spawn`
21
+ in which a REPL has already been started, or a str command to start a new
22
+ REPL process.
23
+ :param str orig_prompt: The prompt to expect at first.
24
+ :param str prompt_change: A command to change the prompt to something more
25
+ unique. If this is ``None``, the prompt will not be changed. This will
26
+ be formatted with the new and continuation prompts as positional
27
+ parameters, so you can use ``{}`` style formatting to insert them into
28
+ the command.
29
+ :param str new_prompt: The more unique prompt to expect after the change.
30
+ :param str extra_init_cmd: Commands to do extra initialisation, such as
31
+ disabling pagers.
32
+ """
33
+ def __init__(self, cmd_or_spawn, orig_prompt, prompt_change,
34
+ new_prompt=PEXPECT_PROMPT,
35
+ continuation_prompt=PEXPECT_CONTINUATION_PROMPT,
36
+ extra_init_cmd=None):
37
+ if isinstance(cmd_or_spawn, basestring):
38
+ self.child = pexpect.spawn(cmd_or_spawn, echo=False, encoding='utf-8')
39
+ else:
40
+ self.child = cmd_or_spawn
41
+ if self.child.echo:
42
+ # Existing spawn instance has echo enabled, disable it
43
+ # to prevent our input from being repeated to output.
44
+ self.child.setecho(False)
45
+ self.child.waitnoecho()
46
+
47
+ if prompt_change is None:
48
+ self.prompt = orig_prompt
49
+ else:
50
+ self.set_prompt(orig_prompt,
51
+ prompt_change.format(new_prompt, continuation_prompt))
52
+ self.prompt = new_prompt
53
+ self.continuation_prompt = continuation_prompt
54
+
55
+ self._expect_prompt()
56
+
57
+ if extra_init_cmd is not None:
58
+ self.run_command(extra_init_cmd)
59
+
60
+ def set_prompt(self, orig_prompt, prompt_change):
61
+ self.child.expect(orig_prompt)
62
+ self.child.sendline(prompt_change)
63
+
64
+ def _expect_prompt(self, timeout=-1, async_=False):
65
+ return self.child.expect_exact([self.prompt, self.continuation_prompt],
66
+ timeout=timeout, async_=async_)
67
+
68
+ def run_command(self, command, timeout=-1, async_=False):
69
+ """Send a command to the REPL, wait for and return output.
70
+
71
+ :param str command: The command to send. Trailing newlines are not needed.
72
+ This should be a complete block of input that will trigger execution;
73
+ if a continuation prompt is found after sending input, :exc:`ValueError`
74
+ will be raised.
75
+ :param int timeout: How long to wait for the next prompt. -1 means the
76
+ default from the :class:`pexpect.spawn` object (default 30 seconds).
77
+ None means to wait indefinitely.
78
+ :param bool async_: On Python 3.4, or Python 3.3 with asyncio
79
+ installed, passing ``async_=True`` will make this return an
80
+ :mod:`asyncio` Future, which you can yield from to get the same
81
+ result that this method would normally give directly.
82
+ """
83
+ # Split up multiline commands and feed them in bit-by-bit
84
+ cmdlines = command.splitlines()
85
+ # splitlines ignores trailing newlines - add it back in manually
86
+ if command.endswith('\n'):
87
+ cmdlines.append('')
88
+ if not cmdlines:
89
+ raise ValueError("No command was given")
90
+
91
+ if async_:
92
+ from ._async import repl_run_command_async
93
+ return repl_run_command_async(self, cmdlines, timeout)
94
+
95
+ res = []
96
+ self.child.sendline(cmdlines[0])
97
+ for line in cmdlines[1:]:
98
+ self._expect_prompt(timeout=timeout)
99
+ res.append(self.child.before)
100
+ self.child.sendline(line)
101
+
102
+ # Command was fully submitted, now wait for the next prompt
103
+ if self._expect_prompt(timeout=timeout) == 1:
104
+ # We got the continuation prompt - command was incomplete
105
+ self.child.kill(signal.SIGINT)
106
+ self._expect_prompt(timeout=1)
107
+ raise ValueError("Continuation prompt found - input was incomplete:\n"
108
+ + command)
109
+ return u''.join(res + [self.child.before])
110
+
111
+ def python(command=sys.executable):
112
+ """Start a Python shell and return a :class:`REPLWrapper` object."""
113
+ return REPLWrapper(command, u">>> ", u"import sys; sys.ps1={0!r}; sys.ps2={1!r}")
114
+
115
+ def _repl_sh(command, args, non_printable_insert):
116
+ child = pexpect.spawn(command, args, echo=False, encoding='utf-8')
117
+
118
+ # If the user runs 'env', the value of PS1 will be in the output. To avoid
119
+ # replwrap seeing that as the next prompt, we'll embed the marker characters
120
+ # for invisible characters in the prompt; these show up when inspecting the
121
+ # environment variable, but not when bash displays the prompt.
122
+ ps1 = PEXPECT_PROMPT[:5] + non_printable_insert + PEXPECT_PROMPT[5:]
123
+ ps2 = PEXPECT_CONTINUATION_PROMPT[:5] + non_printable_insert + PEXPECT_CONTINUATION_PROMPT[5:]
124
+ prompt_change = u"PS1='{0}' PS2='{1}' PROMPT_COMMAND=''".format(ps1, ps2)
125
+
126
+ return REPLWrapper(child, u'\\$', prompt_change,
127
+ extra_init_cmd="export PAGER=cat")
128
+
129
+ def bash(command="bash"):
130
+ """Start a bash shell and return a :class:`REPLWrapper` object."""
131
+ bashrc = os.path.join(os.path.dirname(__file__), 'bashrc.sh')
132
+ return _repl_sh(command, ['--rcfile', bashrc], non_printable_insert='\\[\\]')
133
+
134
+ def zsh(command="zsh", args=("--no-rcs", "-V", "+Z")):
135
+ """Start a zsh shell and return a :class:`REPLWrapper` object."""
136
+ return _repl_sh(command, list(args), non_printable_insert='%(!..)')
evalkit_internvl/lib/python3.10/site-packages/pexpect/run.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import types
3
+
4
+ from .exceptions import EOF, TIMEOUT
5
+ from .pty_spawn import spawn
6
+
7
+ def run(command, timeout=30, withexitstatus=False, events=None,
8
+ extra_args=None, logfile=None, cwd=None, env=None, **kwargs):
9
+
10
+ '''
11
+ This function runs the given command; waits for it to finish; then
12
+ returns all output as a string. STDERR is included in output. If the full
13
+ path to the command is not given then the path is searched.
14
+
15
+ Note that lines are terminated by CR/LF (\\r\\n) combination even on
16
+ UNIX-like systems because this is the standard for pseudottys. If you set
17
+ 'withexitstatus' to true, then run will return a tuple of (command_output,
18
+ exitstatus). If 'withexitstatus' is false then this returns just
19
+ command_output.
20
+
21
+ The run() function can often be used instead of creating a spawn instance.
22
+ For example, the following code uses spawn::
23
+
24
+ from pexpect import *
25
+ child = spawn('scp foo user@example.com:.')
26
+ child.expect('(?i)password')
27
+ child.sendline(mypassword)
28
+
29
+ The previous code can be replace with the following::
30
+
31
+ from pexpect import *
32
+ run('scp foo user@example.com:.', events={'(?i)password': mypassword})
33
+
34
+ **Examples**
35
+
36
+ Start the apache daemon on the local machine::
37
+
38
+ from pexpect import *
39
+ run("/usr/local/apache/bin/apachectl start")
40
+
41
+ Check in a file using SVN::
42
+
43
+ from pexpect import *
44
+ run("svn ci -m 'automatic commit' my_file.py")
45
+
46
+ Run a command and capture exit status::
47
+
48
+ from pexpect import *
49
+ (command_output, exitstatus) = run('ls -l /bin', withexitstatus=1)
50
+
51
+ The following will run SSH and execute 'ls -l' on the remote machine. The
52
+ password 'secret' will be sent if the '(?i)password' pattern is ever seen::
53
+
54
+ run("ssh username@machine.example.com 'ls -l'",
55
+ events={'(?i)password':'secret\\n'})
56
+
57
+ This will start mencoder to rip a video from DVD. This will also display
58
+ progress ticks every 5 seconds as it runs. For example::
59
+
60
+ from pexpect import *
61
+ def print_ticks(d):
62
+ print d['event_count'],
63
+ run("mencoder dvd://1 -o video.avi -oac copy -ovc copy",
64
+ events={TIMEOUT:print_ticks}, timeout=5)
65
+
66
+ The 'events' argument should be either a dictionary or a tuple list that
67
+ contains patterns and responses. Whenever one of the patterns is seen
68
+ in the command output, run() will send the associated response string.
69
+ So, run() in the above example can be also written as::
70
+
71
+ run("mencoder dvd://1 -o video.avi -oac copy -ovc copy",
72
+ events=[(TIMEOUT,print_ticks)], timeout=5)
73
+
74
+ Use a tuple list for events if the command output requires a delicate
75
+ control over what pattern should be matched, since the tuple list is passed
76
+ to pexpect() as its pattern list, with the order of patterns preserved.
77
+
78
+ Note that you should put newlines in your string if Enter is necessary.
79
+
80
+ Like the example above, the responses may also contain a callback, either
81
+ a function or method. It should accept a dictionary value as an argument.
82
+ The dictionary contains all the locals from the run() function, so you can
83
+ access the child spawn object or any other variable defined in run()
84
+ (event_count, child, and extra_args are the most useful). A callback may
85
+ return True to stop the current run process. Otherwise run() continues
86
+ until the next event. A callback may also return a string which will be
87
+ sent to the child. 'extra_args' is not used by directly run(). It provides
88
+ a way to pass data to a callback function through run() through the locals
89
+ dictionary passed to a callback.
90
+
91
+ Like :class:`spawn`, passing *encoding* will make it work with unicode
92
+ instead of bytes. You can pass *codec_errors* to control how errors in
93
+ encoding and decoding are handled.
94
+ '''
95
+ if timeout == -1:
96
+ child = spawn(command, maxread=2000, logfile=logfile, cwd=cwd, env=env,
97
+ **kwargs)
98
+ else:
99
+ child = spawn(command, timeout=timeout, maxread=2000, logfile=logfile,
100
+ cwd=cwd, env=env, **kwargs)
101
+ if isinstance(events, list):
102
+ patterns= [x for x,y in events]
103
+ responses = [y for x,y in events]
104
+ elif isinstance(events, dict):
105
+ patterns = list(events.keys())
106
+ responses = list(events.values())
107
+ else:
108
+ # This assumes EOF or TIMEOUT will eventually cause run to terminate.
109
+ patterns = None
110
+ responses = None
111
+ child_result_list = []
112
+ event_count = 0
113
+ while True:
114
+ try:
115
+ index = child.expect(patterns)
116
+ if isinstance(child.after, child.allowed_string_types):
117
+ child_result_list.append(child.before + child.after)
118
+ else:
119
+ # child.after may have been a TIMEOUT or EOF,
120
+ # which we don't want appended to the list.
121
+ child_result_list.append(child.before)
122
+ if isinstance(responses[index], child.allowed_string_types):
123
+ child.send(responses[index])
124
+ elif (isinstance(responses[index], types.FunctionType) or
125
+ isinstance(responses[index], types.MethodType)):
126
+ callback_result = responses[index](locals())
127
+ sys.stdout.flush()
128
+ if isinstance(callback_result, child.allowed_string_types):
129
+ child.send(callback_result)
130
+ elif callback_result:
131
+ break
132
+ else:
133
+ raise TypeError("parameter `event' at index {index} must be "
134
+ "a string, method, or function: {value!r}"
135
+ .format(index=index, value=responses[index]))
136
+ event_count = event_count + 1
137
+ except TIMEOUT:
138
+ child_result_list.append(child.before)
139
+ break
140
+ except EOF:
141
+ child_result_list.append(child.before)
142
+ break
143
+ child_result = child.string_type().join(child_result_list)
144
+ if withexitstatus:
145
+ child.close()
146
+ return (child_result, child.exitstatus)
147
+ else:
148
+ return child_result
149
+
150
+ def runu(command, timeout=30, withexitstatus=False, events=None,
151
+ extra_args=None, logfile=None, cwd=None, env=None, **kwargs):
152
+ """Deprecated: pass encoding to run() instead.
153
+ """
154
+ kwargs.setdefault('encoding', 'utf-8')
155
+ return run(command, timeout=timeout, withexitstatus=withexitstatus,
156
+ events=events, extra_args=extra_args, logfile=logfile, cwd=cwd,
157
+ env=env, **kwargs)
evalkit_internvl/lib/python3.10/site-packages/pexpect/screen.py ADDED
@@ -0,0 +1,431 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''This implements a virtual screen. This is used to support ANSI terminal
2
+ emulation. The screen representation and state is implemented in this class.
3
+ Most of the methods are inspired by ANSI screen control codes. The
4
+ :class:`~pexpect.ANSI.ANSI` class extends this class to add parsing of ANSI
5
+ escape codes.
6
+
7
+ PEXPECT LICENSE
8
+
9
+ This license is approved by the OSI and FSF as GPL-compatible.
10
+ http://opensource.org/licenses/isc-license.txt
11
+
12
+ Copyright (c) 2012, Noah Spurrier <noah@noah.org>
13
+ PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
14
+ PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
15
+ COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
16
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
17
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
18
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
19
+ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
20
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
+
24
+ '''
25
+
26
+ import codecs
27
+ import copy
28
+ import sys
29
+
30
+ import warnings
31
+
32
+ warnings.warn(("pexpect.screen and pexpect.ANSI are deprecated. "
33
+ "We recommend using pyte to emulate a terminal screen: "
34
+ "https://pypi.python.org/pypi/pyte"),
35
+ stacklevel=2)
36
+
37
+ NUL = 0 # Fill character; ignored on input.
38
+ ENQ = 5 # Transmit answerback message.
39
+ BEL = 7 # Ring the bell.
40
+ BS = 8 # Move cursor left.
41
+ HT = 9 # Move cursor to next tab stop.
42
+ LF = 10 # Line feed.
43
+ VT = 11 # Same as LF.
44
+ FF = 12 # Same as LF.
45
+ CR = 13 # Move cursor to left margin or newline.
46
+ SO = 14 # Invoke G1 character set.
47
+ SI = 15 # Invoke G0 character set.
48
+ XON = 17 # Resume transmission.
49
+ XOFF = 19 # Halt transmission.
50
+ CAN = 24 # Cancel escape sequence.
51
+ SUB = 26 # Same as CAN.
52
+ ESC = 27 # Introduce a control sequence.
53
+ DEL = 127 # Fill character; ignored on input.
54
+ SPACE = u' ' # Space or blank character.
55
+
56
+ PY3 = (sys.version_info[0] >= 3)
57
+ if PY3:
58
+ unicode = str
59
+
60
+ def constrain (n, min, max):
61
+
62
+ '''This returns a number, n constrained to the min and max bounds. '''
63
+
64
+ if n < min:
65
+ return min
66
+ if n > max:
67
+ return max
68
+ return n
69
+
70
+ class screen:
71
+ '''This object maintains the state of a virtual text screen as a
72
+ rectangular array. This maintains a virtual cursor position and handles
73
+ scrolling as characters are added. This supports most of the methods needed
74
+ by an ANSI text screen. Row and column indexes are 1-based (not zero-based,
75
+ like arrays).
76
+
77
+ Characters are represented internally using unicode. Methods that accept
78
+ input characters, when passed 'bytes' (which in Python 2 is equivalent to
79
+ 'str'), convert them from the encoding specified in the 'encoding'
80
+ parameter to the constructor. Methods that return screen contents return
81
+ unicode strings, with the exception of __str__() under Python 2. Passing
82
+ ``encoding=None`` limits the API to only accept unicode input, so passing
83
+ bytes in will raise :exc:`TypeError`.
84
+ '''
85
+ def __init__(self, r=24, c=80, encoding='latin-1', encoding_errors='replace'):
86
+ '''This initializes a blank screen of the given dimensions.'''
87
+
88
+ self.rows = r
89
+ self.cols = c
90
+ self.encoding = encoding
91
+ self.encoding_errors = encoding_errors
92
+ if encoding is not None:
93
+ self.decoder = codecs.getincrementaldecoder(encoding)(encoding_errors)
94
+ else:
95
+ self.decoder = None
96
+ self.cur_r = 1
97
+ self.cur_c = 1
98
+ self.cur_saved_r = 1
99
+ self.cur_saved_c = 1
100
+ self.scroll_row_start = 1
101
+ self.scroll_row_end = self.rows
102
+ self.w = [ [SPACE] * self.cols for _ in range(self.rows)]
103
+
104
+ def _decode(self, s):
105
+ '''This converts from the external coding system (as passed to
106
+ the constructor) to the internal one (unicode). '''
107
+ if self.decoder is not None:
108
+ return self.decoder.decode(s)
109
+ else:
110
+ raise TypeError("This screen was constructed with encoding=None, "
111
+ "so it does not handle bytes.")
112
+
113
+ def _unicode(self):
114
+ '''This returns a printable representation of the screen as a unicode
115
+ string (which, under Python 3.x, is the same as 'str'). The end of each
116
+ screen line is terminated by a newline.'''
117
+
118
+ return u'\n'.join ([ u''.join(c) for c in self.w ])
119
+
120
+ if PY3:
121
+ __str__ = _unicode
122
+ else:
123
+ __unicode__ = _unicode
124
+
125
+ def __str__(self):
126
+ '''This returns a printable representation of the screen. The end of
127
+ each screen line is terminated by a newline. '''
128
+ encoding = self.encoding or 'ascii'
129
+ return self._unicode().encode(encoding, 'replace')
130
+
131
+ def dump (self):
132
+ '''This returns a copy of the screen as a unicode string. This is similar to
133
+ __str__/__unicode__ except that lines are not terminated with line
134
+ feeds.'''
135
+
136
+ return u''.join ([ u''.join(c) for c in self.w ])
137
+
138
+ def pretty (self):
139
+ '''This returns a copy of the screen as a unicode string with an ASCII
140
+ text box around the screen border. This is similar to
141
+ __str__/__unicode__ except that it adds a box.'''
142
+
143
+ top_bot = u'+' + u'-'*self.cols + u'+\n'
144
+ return top_bot + u'\n'.join([u'|'+line+u'|' for line in unicode(self).split(u'\n')]) + u'\n' + top_bot
145
+
146
+ def fill (self, ch=SPACE):
147
+
148
+ if isinstance(ch, bytes):
149
+ ch = self._decode(ch)
150
+
151
+ self.fill_region (1,1,self.rows,self.cols, ch)
152
+
153
+ def fill_region (self, rs,cs, re,ce, ch=SPACE):
154
+
155
+ if isinstance(ch, bytes):
156
+ ch = self._decode(ch)
157
+
158
+ rs = constrain (rs, 1, self.rows)
159
+ re = constrain (re, 1, self.rows)
160
+ cs = constrain (cs, 1, self.cols)
161
+ ce = constrain (ce, 1, self.cols)
162
+ if rs > re:
163
+ rs, re = re, rs
164
+ if cs > ce:
165
+ cs, ce = ce, cs
166
+ for r in range (rs, re+1):
167
+ for c in range (cs, ce + 1):
168
+ self.put_abs (r,c,ch)
169
+
170
+ def cr (self):
171
+ '''This moves the cursor to the beginning (col 1) of the current row.
172
+ '''
173
+
174
+ self.cursor_home (self.cur_r, 1)
175
+
176
+ def lf (self):
177
+ '''This moves the cursor down with scrolling.
178
+ '''
179
+
180
+ old_r = self.cur_r
181
+ self.cursor_down()
182
+ if old_r == self.cur_r:
183
+ self.scroll_up ()
184
+ self.erase_line()
185
+
186
+ def crlf (self):
187
+ '''This advances the cursor with CRLF properties.
188
+ The cursor will line wrap and the screen may scroll.
189
+ '''
190
+
191
+ self.cr ()
192
+ self.lf ()
193
+
194
+ def newline (self):
195
+ '''This is an alias for crlf().
196
+ '''
197
+
198
+ self.crlf()
199
+
200
+ def put_abs (self, r, c, ch):
201
+ '''Screen array starts at 1 index.'''
202
+
203
+ r = constrain (r, 1, self.rows)
204
+ c = constrain (c, 1, self.cols)
205
+ if isinstance(ch, bytes):
206
+ ch = self._decode(ch)[0]
207
+ else:
208
+ ch = ch[0]
209
+ self.w[r-1][c-1] = ch
210
+
211
+ def put (self, ch):
212
+ '''This puts a characters at the current cursor position.
213
+ '''
214
+
215
+ if isinstance(ch, bytes):
216
+ ch = self._decode(ch)
217
+
218
+ self.put_abs (self.cur_r, self.cur_c, ch)
219
+
220
+ def insert_abs (self, r, c, ch):
221
+ '''This inserts a character at (r,c). Everything under
222
+ and to the right is shifted right one character.
223
+ The last character of the line is lost.
224
+ '''
225
+
226
+ if isinstance(ch, bytes):
227
+ ch = self._decode(ch)
228
+
229
+ r = constrain (r, 1, self.rows)
230
+ c = constrain (c, 1, self.cols)
231
+ for ci in range (self.cols, c, -1):
232
+ self.put_abs (r,ci, self.get_abs(r,ci-1))
233
+ self.put_abs (r,c,ch)
234
+
235
+ def insert (self, ch):
236
+
237
+ if isinstance(ch, bytes):
238
+ ch = self._decode(ch)
239
+
240
+ self.insert_abs (self.cur_r, self.cur_c, ch)
241
+
242
+ def get_abs (self, r, c):
243
+
244
+ r = constrain (r, 1, self.rows)
245
+ c = constrain (c, 1, self.cols)
246
+ return self.w[r-1][c-1]
247
+
248
+ def get (self):
249
+
250
+ self.get_abs (self.cur_r, self.cur_c)
251
+
252
+ def get_region (self, rs,cs, re,ce):
253
+ '''This returns a list of lines representing the region.
254
+ '''
255
+
256
+ rs = constrain (rs, 1, self.rows)
257
+ re = constrain (re, 1, self.rows)
258
+ cs = constrain (cs, 1, self.cols)
259
+ ce = constrain (ce, 1, self.cols)
260
+ if rs > re:
261
+ rs, re = re, rs
262
+ if cs > ce:
263
+ cs, ce = ce, cs
264
+ sc = []
265
+ for r in range (rs, re+1):
266
+ line = u''
267
+ for c in range (cs, ce + 1):
268
+ ch = self.get_abs (r,c)
269
+ line = line + ch
270
+ sc.append (line)
271
+ return sc
272
+
273
+ def cursor_constrain (self):
274
+ '''This keeps the cursor within the screen area.
275
+ '''
276
+
277
+ self.cur_r = constrain (self.cur_r, 1, self.rows)
278
+ self.cur_c = constrain (self.cur_c, 1, self.cols)
279
+
280
+ def cursor_home (self, r=1, c=1): # <ESC>[{ROW};{COLUMN}H
281
+
282
+ self.cur_r = r
283
+ self.cur_c = c
284
+ self.cursor_constrain ()
285
+
286
+ def cursor_back (self,count=1): # <ESC>[{COUNT}D (not confused with down)
287
+
288
+ self.cur_c = self.cur_c - count
289
+ self.cursor_constrain ()
290
+
291
+ def cursor_down (self,count=1): # <ESC>[{COUNT}B (not confused with back)
292
+
293
+ self.cur_r = self.cur_r + count
294
+ self.cursor_constrain ()
295
+
296
+ def cursor_forward (self,count=1): # <ESC>[{COUNT}C
297
+
298
+ self.cur_c = self.cur_c + count
299
+ self.cursor_constrain ()
300
+
301
+ def cursor_up (self,count=1): # <ESC>[{COUNT}A
302
+
303
+ self.cur_r = self.cur_r - count
304
+ self.cursor_constrain ()
305
+
306
+ def cursor_up_reverse (self): # <ESC> M (called RI -- Reverse Index)
307
+
308
+ old_r = self.cur_r
309
+ self.cursor_up()
310
+ if old_r == self.cur_r:
311
+ self.scroll_up()
312
+
313
+ def cursor_force_position (self, r, c): # <ESC>[{ROW};{COLUMN}f
314
+ '''Identical to Cursor Home.'''
315
+
316
+ self.cursor_home (r, c)
317
+
318
+ def cursor_save (self): # <ESC>[s
319
+ '''Save current cursor position.'''
320
+
321
+ self.cursor_save_attrs()
322
+
323
+ def cursor_unsave (self): # <ESC>[u
324
+ '''Restores cursor position after a Save Cursor.'''
325
+
326
+ self.cursor_restore_attrs()
327
+
328
+ def cursor_save_attrs (self): # <ESC>7
329
+ '''Save current cursor position.'''
330
+
331
+ self.cur_saved_r = self.cur_r
332
+ self.cur_saved_c = self.cur_c
333
+
334
+ def cursor_restore_attrs (self): # <ESC>8
335
+ '''Restores cursor position after a Save Cursor.'''
336
+
337
+ self.cursor_home (self.cur_saved_r, self.cur_saved_c)
338
+
339
+ def scroll_constrain (self):
340
+ '''This keeps the scroll region within the screen region.'''
341
+
342
+ if self.scroll_row_start <= 0:
343
+ self.scroll_row_start = 1
344
+ if self.scroll_row_end > self.rows:
345
+ self.scroll_row_end = self.rows
346
+
347
+ def scroll_screen (self): # <ESC>[r
348
+ '''Enable scrolling for entire display.'''
349
+
350
+ self.scroll_row_start = 1
351
+ self.scroll_row_end = self.rows
352
+
353
+ def scroll_screen_rows (self, rs, re): # <ESC>[{start};{end}r
354
+ '''Enable scrolling from row {start} to row {end}.'''
355
+
356
+ self.scroll_row_start = rs
357
+ self.scroll_row_end = re
358
+ self.scroll_constrain()
359
+
360
+ def scroll_down (self): # <ESC>D
361
+ '''Scroll display down one line.'''
362
+
363
+ # Screen is indexed from 1, but arrays are indexed from 0.
364
+ s = self.scroll_row_start - 1
365
+ e = self.scroll_row_end - 1
366
+ self.w[s+1:e+1] = copy.deepcopy(self.w[s:e])
367
+
368
+ def scroll_up (self): # <ESC>M
369
+ '''Scroll display up one line.'''
370
+
371
+ # Screen is indexed from 1, but arrays are indexed from 0.
372
+ s = self.scroll_row_start - 1
373
+ e = self.scroll_row_end - 1
374
+ self.w[s:e] = copy.deepcopy(self.w[s+1:e+1])
375
+
376
+ def erase_end_of_line (self): # <ESC>[0K -or- <ESC>[K
377
+ '''Erases from the current cursor position to the end of the current
378
+ line.'''
379
+
380
+ self.fill_region (self.cur_r, self.cur_c, self.cur_r, self.cols)
381
+
382
+ def erase_start_of_line (self): # <ESC>[1K
383
+ '''Erases from the current cursor position to the start of the current
384
+ line.'''
385
+
386
+ self.fill_region (self.cur_r, 1, self.cur_r, self.cur_c)
387
+
388
+ def erase_line (self): # <ESC>[2K
389
+ '''Erases the entire current line.'''
390
+
391
+ self.fill_region (self.cur_r, 1, self.cur_r, self.cols)
392
+
393
+ def erase_down (self): # <ESC>[0J -or- <ESC>[J
394
+ '''Erases the screen from the current line down to the bottom of the
395
+ screen.'''
396
+
397
+ self.erase_end_of_line ()
398
+ self.fill_region (self.cur_r + 1, 1, self.rows, self.cols)
399
+
400
+ def erase_up (self): # <ESC>[1J
401
+ '''Erases the screen from the current line up to the top of the
402
+ screen.'''
403
+
404
+ self.erase_start_of_line ()
405
+ self.fill_region (self.cur_r-1, 1, 1, self.cols)
406
+
407
+ def erase_screen (self): # <ESC>[2J
408
+ '''Erases the screen with the background color.'''
409
+
410
+ self.fill ()
411
+
412
+ def set_tab (self): # <ESC>H
413
+ '''Sets a tab at the current position.'''
414
+
415
+ pass
416
+
417
+ def clear_tab (self): # <ESC>[g
418
+ '''Clears tab at the current position.'''
419
+
420
+ pass
421
+
422
+ def clear_all_tabs (self): # <ESC>[3g
423
+ '''Clears all tabs.'''
424
+
425
+ pass
426
+
427
+ # Insert line Esc [ Pn L
428
+ # Delete line Esc [ Pn M
429
+ # Delete character Esc [ Pn P
430
+ # Scrolling region Esc [ Pn(top);Pn(bot) r
431
+
evalkit_internvl/lib/python3.10/site-packages/pexpect/socket_pexpect.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This is like :mod:`pexpect`, but it will work with any socket that you
2
+ pass it. You are responsible for opening and closing the socket.
3
+
4
+ PEXPECT LICENSE
5
+
6
+ This license is approved by the OSI and FSF as GPL-compatible.
7
+ http://opensource.org/licenses/isc-license.txt
8
+
9
+ Copyright (c) 2012, Noah Spurrier <noah@noah.org>
10
+ PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
11
+ PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
12
+ COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
13
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16
+ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
+
21
+ """
22
+
23
+ import socket
24
+ from contextlib import contextmanager
25
+
26
+ from .exceptions import TIMEOUT, EOF
27
+ from .spawnbase import SpawnBase
28
+
29
+ __all__ = ["SocketSpawn"]
30
+
31
+
32
+ class SocketSpawn(SpawnBase):
33
+ """This is like :mod:`pexpect.fdpexpect` but uses the cross-platform python socket api,
34
+ rather than the unix-specific file descriptor api. Thus, it works with
35
+ remote connections on both unix and windows."""
36
+
37
+ def __init__(
38
+ self,
39
+ socket: socket.socket,
40
+ args=None,
41
+ timeout=30,
42
+ maxread=2000,
43
+ searchwindowsize=None,
44
+ logfile=None,
45
+ encoding=None,
46
+ codec_errors="strict",
47
+ use_poll=False,
48
+ ):
49
+ """This takes an open socket."""
50
+
51
+ self.args = None
52
+ self.command = None
53
+ SpawnBase.__init__(
54
+ self,
55
+ timeout,
56
+ maxread,
57
+ searchwindowsize,
58
+ logfile,
59
+ encoding=encoding,
60
+ codec_errors=codec_errors,
61
+ )
62
+ self.socket = socket
63
+ self.child_fd = socket.fileno()
64
+ self.closed = False
65
+ self.name = "<socket %s>" % socket
66
+ self.use_poll = use_poll
67
+
68
+ def close(self):
69
+ """Close the socket.
70
+
71
+ Calling this method a second time does nothing, but if the file
72
+ descriptor was closed elsewhere, :class:`OSError` will be raised.
73
+ """
74
+ if self.child_fd == -1:
75
+ return
76
+
77
+ self.flush()
78
+ self.socket.shutdown(socket.SHUT_RDWR)
79
+ self.socket.close()
80
+ self.child_fd = -1
81
+ self.closed = True
82
+
83
+ def isalive(self):
84
+ """ Alive if the fileno is valid """
85
+ return self.socket.fileno() >= 0
86
+
87
+ def send(self, s) -> int:
88
+ """Write to socket, return number of bytes written"""
89
+ s = self._coerce_send_string(s)
90
+ self._log(s, "send")
91
+
92
+ b = self._encoder.encode(s, final=False)
93
+ self.socket.sendall(b)
94
+ return len(b)
95
+
96
+ def sendline(self, s) -> int:
97
+ """Write to socket with trailing newline, return number of bytes written"""
98
+ s = self._coerce_send_string(s)
99
+ return self.send(s + self.linesep)
100
+
101
+ def write(self, s):
102
+ """Write to socket, return None"""
103
+ self.send(s)
104
+
105
+ def writelines(self, sequence):
106
+ "Call self.write() for each item in sequence"
107
+ for s in sequence:
108
+ self.write(s)
109
+
110
+ @contextmanager
111
+ def _timeout(self, timeout):
112
+ saved_timeout = self.socket.gettimeout()
113
+ try:
114
+ self.socket.settimeout(timeout)
115
+ yield
116
+ finally:
117
+ self.socket.settimeout(saved_timeout)
118
+
119
+ def read_nonblocking(self, size=1, timeout=-1):
120
+ """
121
+ Read from the file descriptor and return the result as a string.
122
+
123
+ The read_nonblocking method of :class:`SpawnBase` assumes that a call
124
+ to os.read will not block (timeout parameter is ignored). This is not
125
+ the case for POSIX file-like objects such as sockets and serial ports.
126
+
127
+ Use :func:`select.select`, timeout is implemented conditionally for
128
+ POSIX systems.
129
+
130
+ :param int size: Read at most *size* bytes.
131
+ :param int timeout: Wait timeout seconds for file descriptor to be
132
+ ready to read. When -1 (default), use self.timeout. When 0, poll.
133
+ :return: String containing the bytes read
134
+ """
135
+ if timeout == -1:
136
+ timeout = self.timeout
137
+ try:
138
+ with self._timeout(timeout):
139
+ s = self.socket.recv(size)
140
+ if s == b'':
141
+ self.flag_eof = True
142
+ raise EOF("Socket closed")
143
+ return s
144
+ except socket.timeout:
145
+ raise TIMEOUT("Timeout exceeded.")
evalkit_internvl/lib/python3.10/site-packages/pexpect/spawnbase.py ADDED
@@ -0,0 +1,536 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import StringIO, BytesIO
2
+ import codecs
3
+ import os
4
+ import sys
5
+ import re
6
+ import errno
7
+ from .exceptions import ExceptionPexpect, EOF, TIMEOUT
8
+ from .expect import Expecter, searcher_string, searcher_re
9
+
10
+ PY3 = (sys.version_info[0] >= 3)
11
+ text_type = str if PY3 else unicode
12
+
13
+ class _NullCoder(object):
14
+ """Pass bytes through unchanged."""
15
+ @staticmethod
16
+ def encode(b, final=False):
17
+ return b
18
+
19
+ @staticmethod
20
+ def decode(b, final=False):
21
+ return b
22
+
23
+ class SpawnBase(object):
24
+ """A base class providing the backwards-compatible spawn API for Pexpect.
25
+
26
+ This should not be instantiated directly: use :class:`pexpect.spawn` or
27
+ :class:`pexpect.fdpexpect.fdspawn`.
28
+ """
29
+ encoding = None
30
+ pid = None
31
+ flag_eof = False
32
+
33
+ def __init__(self, timeout=30, maxread=2000, searchwindowsize=None,
34
+ logfile=None, encoding=None, codec_errors='strict'):
35
+ self.stdin = sys.stdin
36
+ self.stdout = sys.stdout
37
+ self.stderr = sys.stderr
38
+
39
+ self.searcher = None
40
+ self.ignorecase = False
41
+ self.before = None
42
+ self.after = None
43
+ self.match = None
44
+ self.match_index = None
45
+ self.terminated = True
46
+ self.exitstatus = None
47
+ self.signalstatus = None
48
+ # status returned by os.waitpid
49
+ self.status = None
50
+ # the child file descriptor is initially closed
51
+ self.child_fd = -1
52
+ self.timeout = timeout
53
+ self.delimiter = EOF
54
+ self.logfile = logfile
55
+ # input from child (read_nonblocking)
56
+ self.logfile_read = None
57
+ # output to send (send, sendline)
58
+ self.logfile_send = None
59
+ # max bytes to read at one time into buffer
60
+ self.maxread = maxread
61
+ # Data before searchwindowsize point is preserved, but not searched.
62
+ self.searchwindowsize = searchwindowsize
63
+ # Delay used before sending data to child. Time in seconds.
64
+ # Set this to None to skip the time.sleep() call completely.
65
+ self.delaybeforesend = 0.05
66
+ # Used by close() to give kernel time to update process status.
67
+ # Time in seconds.
68
+ self.delayafterclose = 0.1
69
+ # Used by terminate() to give kernel time to update process status.
70
+ # Time in seconds.
71
+ self.delayafterterminate = 0.1
72
+ # Delay in seconds to sleep after each call to read_nonblocking().
73
+ # Set this to None to skip the time.sleep() call completely: that
74
+ # would restore the behavior from pexpect-2.0 (for performance
75
+ # reasons or because you don't want to release Python's global
76
+ # interpreter lock).
77
+ self.delayafterread = 0.0001
78
+ self.softspace = False
79
+ self.name = '<' + repr(self) + '>'
80
+ self.closed = True
81
+
82
+ # Unicode interface
83
+ self.encoding = encoding
84
+ self.codec_errors = codec_errors
85
+ if encoding is None:
86
+ # bytes mode (accepts some unicode for backwards compatibility)
87
+ self._encoder = self._decoder = _NullCoder()
88
+ self.string_type = bytes
89
+ self.buffer_type = BytesIO
90
+ self.crlf = b'\r\n'
91
+ if PY3:
92
+ self.allowed_string_types = (bytes, str)
93
+ self.linesep = os.linesep.encode('ascii')
94
+ def write_to_stdout(b):
95
+ try:
96
+ return sys.stdout.buffer.write(b)
97
+ except AttributeError:
98
+ # If stdout has been replaced, it may not have .buffer
99
+ return sys.stdout.write(b.decode('ascii', 'replace'))
100
+ self.write_to_stdout = write_to_stdout
101
+ else:
102
+ self.allowed_string_types = (basestring,) # analysis:ignore
103
+ self.linesep = os.linesep
104
+ self.write_to_stdout = sys.stdout.write
105
+ else:
106
+ # unicode mode
107
+ self._encoder = codecs.getincrementalencoder(encoding)(codec_errors)
108
+ self._decoder = codecs.getincrementaldecoder(encoding)(codec_errors)
109
+ self.string_type = text_type
110
+ self.buffer_type = StringIO
111
+ self.crlf = u'\r\n'
112
+ self.allowed_string_types = (text_type, )
113
+ if PY3:
114
+ self.linesep = os.linesep
115
+ else:
116
+ self.linesep = os.linesep.decode('ascii')
117
+ # This can handle unicode in both Python 2 and 3
118
+ self.write_to_stdout = sys.stdout.write
119
+ # storage for async transport
120
+ self.async_pw_transport = None
121
+ # This is the read buffer. See maxread.
122
+ self._buffer = self.buffer_type()
123
+ # The buffer may be trimmed for efficiency reasons. This is the
124
+ # untrimmed buffer, used to create the before attribute.
125
+ self._before = self.buffer_type()
126
+
127
+ def _log(self, s, direction):
128
+ if self.logfile is not None:
129
+ self.logfile.write(s)
130
+ self.logfile.flush()
131
+ second_log = self.logfile_send if (direction=='send') else self.logfile_read
132
+ if second_log is not None:
133
+ second_log.write(s)
134
+ second_log.flush()
135
+
136
+ # For backwards compatibility, in bytes mode (when encoding is None)
137
+ # unicode is accepted for send and expect. Unicode mode is strictly unicode
138
+ # only.
139
+ def _coerce_expect_string(self, s):
140
+ if self.encoding is None and not isinstance(s, bytes):
141
+ return s.encode('ascii')
142
+ return s
143
+
144
+ # In bytes mode, regex patterns should also be of bytes type
145
+ def _coerce_expect_re(self, r):
146
+ p = r.pattern
147
+ if self.encoding is None and not isinstance(p, bytes):
148
+ return re.compile(p.encode('utf-8'))
149
+ # And vice-versa
150
+ elif self.encoding is not None and isinstance(p, bytes):
151
+ return re.compile(p.decode('utf-8'))
152
+ return r
153
+
154
+ def _coerce_send_string(self, s):
155
+ if self.encoding is None and not isinstance(s, bytes):
156
+ return s.encode('utf-8')
157
+ return s
158
+
159
+ def _get_buffer(self):
160
+ return self._buffer.getvalue()
161
+
162
+ def _set_buffer(self, value):
163
+ self._buffer = self.buffer_type()
164
+ self._buffer.write(value)
165
+
166
+ # This property is provided for backwards compatibility (self.buffer used
167
+ # to be a string/bytes object)
168
+ buffer = property(_get_buffer, _set_buffer)
169
+
170
+ def read_nonblocking(self, size=1, timeout=None):
171
+ """This reads data from the file descriptor.
172
+
173
+ This is a simple implementation suitable for a regular file. Subclasses using ptys or pipes should override it.
174
+
175
+ The timeout parameter is ignored.
176
+ """
177
+
178
+ try:
179
+ s = os.read(self.child_fd, size)
180
+ except OSError as err:
181
+ if err.args[0] == errno.EIO:
182
+ # Linux-style EOF
183
+ self.flag_eof = True
184
+ raise EOF('End Of File (EOF). Exception style platform.')
185
+ raise
186
+ if s == b'':
187
+ # BSD-style EOF
188
+ self.flag_eof = True
189
+ raise EOF('End Of File (EOF). Empty string style platform.')
190
+
191
+ s = self._decoder.decode(s, final=False)
192
+ self._log(s, 'read')
193
+ return s
194
+
195
+ def _pattern_type_err(self, pattern):
196
+ raise TypeError('got {badtype} ({badobj!r}) as pattern, must be one'
197
+ ' of: {goodtypes}, pexpect.EOF, pexpect.TIMEOUT'\
198
+ .format(badtype=type(pattern),
199
+ badobj=pattern,
200
+ goodtypes=', '.join([str(ast)\
201
+ for ast in self.allowed_string_types])
202
+ )
203
+ )
204
+
205
+ def compile_pattern_list(self, patterns):
206
+ '''This compiles a pattern-string or a list of pattern-strings.
207
+ Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of
208
+ those. Patterns may also be None which results in an empty list (you
209
+ might do this if waiting for an EOF or TIMEOUT condition without
210
+ expecting any pattern).
211
+
212
+ This is used by expect() when calling expect_list(). Thus expect() is
213
+ nothing more than::
214
+
215
+ cpl = self.compile_pattern_list(pl)
216
+ return self.expect_list(cpl, timeout)
217
+
218
+ If you are using expect() within a loop it may be more
219
+ efficient to compile the patterns first and then call expect_list().
220
+ This avoid calls in a loop to compile_pattern_list()::
221
+
222
+ cpl = self.compile_pattern_list(my_pattern)
223
+ while some_condition:
224
+ ...
225
+ i = self.expect_list(cpl, timeout)
226
+ ...
227
+ '''
228
+
229
+ if patterns is None:
230
+ return []
231
+ if not isinstance(patterns, list):
232
+ patterns = [patterns]
233
+
234
+ # Allow dot to match \n
235
+ compile_flags = re.DOTALL
236
+ if self.ignorecase:
237
+ compile_flags = compile_flags | re.IGNORECASE
238
+ compiled_pattern_list = []
239
+ for idx, p in enumerate(patterns):
240
+ if isinstance(p, self.allowed_string_types):
241
+ p = self._coerce_expect_string(p)
242
+ compiled_pattern_list.append(re.compile(p, compile_flags))
243
+ elif p is EOF:
244
+ compiled_pattern_list.append(EOF)
245
+ elif p is TIMEOUT:
246
+ compiled_pattern_list.append(TIMEOUT)
247
+ elif isinstance(p, type(re.compile(''))):
248
+ p = self._coerce_expect_re(p)
249
+ compiled_pattern_list.append(p)
250
+ else:
251
+ self._pattern_type_err(p)
252
+ return compiled_pattern_list
253
+
254
+ def expect(self, pattern, timeout=-1, searchwindowsize=-1, async_=False, **kw):
255
+ '''This seeks through the stream until a pattern is matched. The
256
+ pattern is overloaded and may take several types. The pattern can be a
257
+ StringType, EOF, a compiled re, or a list of any of those types.
258
+ Strings will be compiled to re types. This returns the index into the
259
+ pattern list. If the pattern was not a list this returns index 0 on a
260
+ successful match. This may raise exceptions for EOF or TIMEOUT. To
261
+ avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern
262
+ list. That will cause expect to match an EOF or TIMEOUT condition
263
+ instead of raising an exception.
264
+
265
+ If you pass a list of patterns and more than one matches, the first
266
+ match in the stream is chosen. If more than one pattern matches at that
267
+ point, the leftmost in the pattern list is chosen. For example::
268
+
269
+ # the input is 'foobar'
270
+ index = p.expect(['bar', 'foo', 'foobar'])
271
+ # returns 1('foo') even though 'foobar' is a "better" match
272
+
273
+ Please note, however, that buffering can affect this behavior, since
274
+ input arrives in unpredictable chunks. For example::
275
+
276
+ # the input is 'foobar'
277
+ index = p.expect(['foobar', 'foo'])
278
+ # returns 0('foobar') if all input is available at once,
279
+ # but returns 1('foo') if parts of the final 'bar' arrive late
280
+
281
+ When a match is found for the given pattern, the class instance
282
+ attribute *match* becomes an re.MatchObject result. Should an EOF
283
+ or TIMEOUT pattern match, then the match attribute will be an instance
284
+ of that exception class. The pairing before and after class
285
+ instance attributes are views of the data preceding and following
286
+ the matching pattern. On general exception, class attribute
287
+ *before* is all data received up to the exception, while *match* and
288
+ *after* attributes are value None.
289
+
290
+ When the keyword argument timeout is -1 (default), then TIMEOUT will
291
+ raise after the default value specified by the class timeout
292
+ attribute. When None, TIMEOUT will not be raised and may block
293
+ indefinitely until match.
294
+
295
+ When the keyword argument searchwindowsize is -1 (default), then the
296
+ value specified by the class maxread attribute is used.
297
+
298
+ A list entry may be EOF or TIMEOUT instead of a string. This will
299
+ catch these exceptions and return the index of the list entry instead
300
+ of raising the exception. The attribute 'after' will be set to the
301
+ exception type. The attribute 'match' will be None. This allows you to
302
+ write code like this::
303
+
304
+ index = p.expect(['good', 'bad', pexpect.EOF, pexpect.TIMEOUT])
305
+ if index == 0:
306
+ do_something()
307
+ elif index == 1:
308
+ do_something_else()
309
+ elif index == 2:
310
+ do_some_other_thing()
311
+ elif index == 3:
312
+ do_something_completely_different()
313
+
314
+ instead of code like this::
315
+
316
+ try:
317
+ index = p.expect(['good', 'bad'])
318
+ if index == 0:
319
+ do_something()
320
+ elif index == 1:
321
+ do_something_else()
322
+ except EOF:
323
+ do_some_other_thing()
324
+ except TIMEOUT:
325
+ do_something_completely_different()
326
+
327
+ These two forms are equivalent. It all depends on what you want. You
328
+ can also just expect the EOF if you are waiting for all output of a
329
+ child to finish. For example::
330
+
331
+ p = pexpect.spawn('/bin/ls')
332
+ p.expect(pexpect.EOF)
333
+ print p.before
334
+
335
+ If you are trying to optimize for speed then see expect_list().
336
+
337
+ On Python 3.4, or Python 3.3 with asyncio installed, passing
338
+ ``async_=True`` will make this return an :mod:`asyncio` coroutine,
339
+ which you can yield from to get the same result that this method would
340
+ normally give directly. So, inside a coroutine, you can replace this code::
341
+
342
+ index = p.expect(patterns)
343
+
344
+ With this non-blocking form::
345
+
346
+ index = yield from p.expect(patterns, async_=True)
347
+ '''
348
+ if 'async' in kw:
349
+ async_ = kw.pop('async')
350
+ if kw:
351
+ raise TypeError("Unknown keyword arguments: {}".format(kw))
352
+
353
+ compiled_pattern_list = self.compile_pattern_list(pattern)
354
+ return self.expect_list(compiled_pattern_list,
355
+ timeout, searchwindowsize, async_)
356
+
357
+ def expect_list(self, pattern_list, timeout=-1, searchwindowsize=-1,
358
+ async_=False, **kw):
359
+ '''This takes a list of compiled regular expressions and returns the
360
+ index into the pattern_list that matched the child output. The list may
361
+ also contain EOF or TIMEOUT(which are not compiled regular
362
+ expressions). This method is similar to the expect() method except that
363
+ expect_list() does not recompile the pattern list on every call. This
364
+ may help if you are trying to optimize for speed, otherwise just use
365
+ the expect() method. This is called by expect().
366
+
367
+
368
+ Like :meth:`expect`, passing ``async_=True`` will make this return an
369
+ asyncio coroutine.
370
+ '''
371
+ if timeout == -1:
372
+ timeout = self.timeout
373
+ if 'async' in kw:
374
+ async_ = kw.pop('async')
375
+ if kw:
376
+ raise TypeError("Unknown keyword arguments: {}".format(kw))
377
+
378
+ exp = Expecter(self, searcher_re(pattern_list), searchwindowsize)
379
+ if async_:
380
+ from ._async import expect_async
381
+ return expect_async(exp, timeout)
382
+ else:
383
+ return exp.expect_loop(timeout)
384
+
385
+ def expect_exact(self, pattern_list, timeout=-1, searchwindowsize=-1,
386
+ async_=False, **kw):
387
+
388
+ '''This is similar to expect(), but uses plain string matching instead
389
+ of compiled regular expressions in 'pattern_list'. The 'pattern_list'
390
+ may be a string; a list or other sequence of strings; or TIMEOUT and
391
+ EOF.
392
+
393
+ This call might be faster than expect() for two reasons: string
394
+ searching is faster than RE matching and it is possible to limit the
395
+ search to just the end of the input buffer.
396
+
397
+ This method is also useful when you don't want to have to worry about
398
+ escaping regular expression characters that you want to match.
399
+
400
+ Like :meth:`expect`, passing ``async_=True`` will make this return an
401
+ asyncio coroutine.
402
+ '''
403
+ if timeout == -1:
404
+ timeout = self.timeout
405
+ if 'async' in kw:
406
+ async_ = kw.pop('async')
407
+ if kw:
408
+ raise TypeError("Unknown keyword arguments: {}".format(kw))
409
+
410
+ if (isinstance(pattern_list, self.allowed_string_types) or
411
+ pattern_list in (TIMEOUT, EOF)):
412
+ pattern_list = [pattern_list]
413
+
414
+ def prepare_pattern(pattern):
415
+ if pattern in (TIMEOUT, EOF):
416
+ return pattern
417
+ if isinstance(pattern, self.allowed_string_types):
418
+ return self._coerce_expect_string(pattern)
419
+ self._pattern_type_err(pattern)
420
+
421
+ try:
422
+ pattern_list = iter(pattern_list)
423
+ except TypeError:
424
+ self._pattern_type_err(pattern_list)
425
+ pattern_list = [prepare_pattern(p) for p in pattern_list]
426
+
427
+ exp = Expecter(self, searcher_string(pattern_list), searchwindowsize)
428
+ if async_:
429
+ from ._async import expect_async
430
+ return expect_async(exp, timeout)
431
+ else:
432
+ return exp.expect_loop(timeout)
433
+
434
+ def expect_loop(self, searcher, timeout=-1, searchwindowsize=-1):
435
+ '''This is the common loop used inside expect. The 'searcher' should be
436
+ an instance of searcher_re or searcher_string, which describes how and
437
+ what to search for in the input.
438
+
439
+ See expect() for other arguments, return value and exceptions. '''
440
+
441
+ exp = Expecter(self, searcher, searchwindowsize)
442
+ return exp.expect_loop(timeout)
443
+
444
+ def read(self, size=-1):
445
+ '''This reads at most "size" bytes from the file (less if the read hits
446
+ EOF before obtaining size bytes). If the size argument is negative or
447
+ omitted, read all data until EOF is reached. The bytes are returned as
448
+ a string object. An empty string is returned when EOF is encountered
449
+ immediately. '''
450
+
451
+ if size == 0:
452
+ return self.string_type()
453
+ if size < 0:
454
+ # delimiter default is EOF
455
+ self.expect(self.delimiter)
456
+ return self.before
457
+
458
+ # I could have done this more directly by not using expect(), but
459
+ # I deliberately decided to couple read() to expect() so that
460
+ # I would catch any bugs early and ensure consistent behavior.
461
+ # It's a little less efficient, but there is less for me to
462
+ # worry about if I have to later modify read() or expect().
463
+ # Note, it's OK if size==-1 in the regex. That just means it
464
+ # will never match anything in which case we stop only on EOF.
465
+ cre = re.compile(self._coerce_expect_string('.{%d}' % size), re.DOTALL)
466
+ # delimiter default is EOF
467
+ index = self.expect([cre, self.delimiter])
468
+ if index == 0:
469
+ ### FIXME self.before should be ''. Should I assert this?
470
+ return self.after
471
+ return self.before
472
+
473
+ def readline(self, size=-1):
474
+ '''This reads and returns one entire line. The newline at the end of
475
+ line is returned as part of the string, unless the file ends without a
476
+ newline. An empty string is returned if EOF is encountered immediately.
477
+ This looks for a newline as a CR/LF pair (\\r\\n) even on UNIX because
478
+ this is what the pseudotty device returns. So contrary to what you may
479
+ expect you will receive newlines as \\r\\n.
480
+
481
+ If the size argument is 0 then an empty string is returned. In all
482
+ other cases the size argument is ignored, which is not standard
483
+ behavior for a file-like object. '''
484
+
485
+ if size == 0:
486
+ return self.string_type()
487
+ # delimiter default is EOF
488
+ index = self.expect([self.crlf, self.delimiter])
489
+ if index == 0:
490
+ return self.before + self.crlf
491
+ else:
492
+ return self.before
493
+
494
+ def __iter__(self):
495
+ '''This is to support iterators over a file-like object.
496
+ '''
497
+ return iter(self.readline, self.string_type())
498
+
499
+ def readlines(self, sizehint=-1):
500
+ '''This reads until EOF using readline() and returns a list containing
501
+ the lines thus read. The optional 'sizehint' argument is ignored.
502
+ Remember, because this reads until EOF that means the child
503
+ process should have closed its stdout. If you run this method on
504
+ a child that is still running with its stdout open then this
505
+ method will block until it timesout.'''
506
+
507
+ lines = []
508
+ while True:
509
+ line = self.readline()
510
+ if not line:
511
+ break
512
+ lines.append(line)
513
+ return lines
514
+
515
+ def fileno(self):
516
+ '''Expose file descriptor for a file-like interface
517
+ '''
518
+ return self.child_fd
519
+
520
+ def flush(self):
521
+ '''This does nothing. It is here to support the interface for a
522
+ File-like object. '''
523
+ pass
524
+
525
+ def isatty(self):
526
+ """Overridden in subclass using tty"""
527
+ return False
528
+
529
+ # For 'with spawn(...) as child:'
530
+ def __enter__(self):
531
+ return self
532
+
533
+ def __exit__(self, etype, evalue, tb):
534
+ # We rely on subclasses to implement close(). If they don't, it's not
535
+ # clear what a context manager should do.
536
+ self.close()
evalkit_internvl/lib/python3.10/site-packages/pexpect/utils.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import stat
4
+ import select
5
+ import time
6
+ import errno
7
+
8
+ try:
9
+ InterruptedError
10
+ except NameError:
11
+ # Alias Python2 exception to Python3
12
+ InterruptedError = select.error
13
+
14
+ if sys.version_info[0] >= 3:
15
+ string_types = (str,)
16
+ else:
17
+ string_types = (unicode, str)
18
+
19
+
20
+ def is_executable_file(path):
21
+ """Checks that path is an executable regular file, or a symlink towards one.
22
+
23
+ This is roughly ``os.path isfile(path) and os.access(path, os.X_OK)``.
24
+ """
25
+ # follow symlinks,
26
+ fpath = os.path.realpath(path)
27
+
28
+ if not os.path.isfile(fpath):
29
+ # non-files (directories, fifo, etc.)
30
+ return False
31
+
32
+ mode = os.stat(fpath).st_mode
33
+
34
+ if (sys.platform.startswith('sunos')
35
+ and os.getuid() == 0):
36
+ # When root on Solaris, os.X_OK is True for *all* files, irregardless
37
+ # of their executability -- instead, any permission bit of any user,
38
+ # group, or other is fine enough.
39
+ #
40
+ # (This may be true for other "Unix98" OS's such as HP-UX and AIX)
41
+ return bool(mode & (stat.S_IXUSR |
42
+ stat.S_IXGRP |
43
+ stat.S_IXOTH))
44
+
45
+ return os.access(fpath, os.X_OK)
46
+
47
+
48
+ def which(filename, env=None):
49
+ '''This takes a given filename; tries to find it in the environment path;
50
+ then checks if it is executable. This returns the full path to the filename
51
+ if found and executable. Otherwise this returns None.'''
52
+
53
+ # Special case where filename contains an explicit path.
54
+ if os.path.dirname(filename) != '' and is_executable_file(filename):
55
+ return filename
56
+ if env is None:
57
+ env = os.environ
58
+ p = env.get('PATH')
59
+ if not p:
60
+ p = os.defpath
61
+ pathlist = p.split(os.pathsep)
62
+ for path in pathlist:
63
+ ff = os.path.join(path, filename)
64
+ if is_executable_file(ff):
65
+ return ff
66
+ return None
67
+
68
+
69
+ def split_command_line(command_line):
70
+
71
+ '''This splits a command line into a list of arguments. It splits arguments
72
+ on spaces, but handles embedded quotes, doublequotes, and escaped
73
+ characters. It's impossible to do this with a regular expression, so I
74
+ wrote a little state machine to parse the command line. '''
75
+
76
+ arg_list = []
77
+ arg = ''
78
+
79
+ # Constants to name the states we can be in.
80
+ state_basic = 0
81
+ state_esc = 1
82
+ state_singlequote = 2
83
+ state_doublequote = 3
84
+ # The state when consuming whitespace between commands.
85
+ state_whitespace = 4
86
+ state = state_basic
87
+
88
+ for c in command_line:
89
+ if state == state_basic or state == state_whitespace:
90
+ if c == '\\':
91
+ # Escape the next character
92
+ state = state_esc
93
+ elif c == r"'":
94
+ # Handle single quote
95
+ state = state_singlequote
96
+ elif c == r'"':
97
+ # Handle double quote
98
+ state = state_doublequote
99
+ elif c.isspace():
100
+ # Add arg to arg_list if we aren't in the middle of whitespace.
101
+ if state == state_whitespace:
102
+ # Do nothing.
103
+ None
104
+ else:
105
+ arg_list.append(arg)
106
+ arg = ''
107
+ state = state_whitespace
108
+ else:
109
+ arg = arg + c
110
+ state = state_basic
111
+ elif state == state_esc:
112
+ arg = arg + c
113
+ state = state_basic
114
+ elif state == state_singlequote:
115
+ if c == r"'":
116
+ state = state_basic
117
+ else:
118
+ arg = arg + c
119
+ elif state == state_doublequote:
120
+ if c == r'"':
121
+ state = state_basic
122
+ else:
123
+ arg = arg + c
124
+
125
+ if arg != '':
126
+ arg_list.append(arg)
127
+ return arg_list
128
+
129
+
130
+ def select_ignore_interrupts(iwtd, owtd, ewtd, timeout=None):
131
+
132
+ '''This is a wrapper around select.select() that ignores signals. If
133
+ select.select raises a select.error exception and errno is an EINTR
134
+ error then it is ignored. Mainly this is used to ignore sigwinch
135
+ (terminal resize). '''
136
+
137
+ # if select() is interrupted by a signal (errno==EINTR) then
138
+ # we loop back and enter the select() again.
139
+ if timeout is not None:
140
+ end_time = time.time() + timeout
141
+ while True:
142
+ try:
143
+ return select.select(iwtd, owtd, ewtd, timeout)
144
+ except InterruptedError:
145
+ err = sys.exc_info()[1]
146
+ if err.args[0] == errno.EINTR:
147
+ # if we loop back we have to subtract the
148
+ # amount of time we already waited.
149
+ if timeout is not None:
150
+ timeout = end_time - time.time()
151
+ if timeout < 0:
152
+ return([], [], [])
153
+ else:
154
+ # something else caused the select.error, so
155
+ # this actually is an exception.
156
+ raise
157
+
158
+
159
+ def poll_ignore_interrupts(fds, timeout=None):
160
+ '''Simple wrapper around poll to register file descriptors and
161
+ ignore signals.'''
162
+
163
+ if timeout is not None:
164
+ end_time = time.time() + timeout
165
+
166
+ poller = select.poll()
167
+ for fd in fds:
168
+ poller.register(fd, select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLERR)
169
+
170
+ while True:
171
+ try:
172
+ timeout_ms = None if timeout is None else timeout * 1000
173
+ results = poller.poll(timeout_ms)
174
+ return [afd for afd, _ in results]
175
+ except InterruptedError:
176
+ err = sys.exc_info()[1]
177
+ if err.args[0] == errno.EINTR:
178
+ # if we loop back we have to subtract the
179
+ # amount of time we already waited.
180
+ if timeout is not None:
181
+ timeout = end_time - time.time()
182
+ if timeout < 0:
183
+ return []
184
+ else:
185
+ # something else caused the select.error, so
186
+ # this actually is an exception.
187
+ raise
evalkit_internvl/lib/python3.10/site-packages/sympy/polys/benchmarks/__pycache__/bench_solvers.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2db86a15808f09d39c7522e3a99012d3d7542da9be29c699a86333c58226752
3
+ size 334858