ZTWHHH commited on
Commit
2523da7
·
verified ·
1 Parent(s): 0f07336

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. parrot/lib/python3.10/site-packages/httpx-0.24.0.dist-info/WHEEL +4 -0
  2. parrot/lib/python3.10/site-packages/idna/__pycache__/__init__.cpython-310.pyc +0 -0
  3. parrot/lib/python3.10/site-packages/idna/__pycache__/compat.cpython-310.pyc +0 -0
  4. parrot/lib/python3.10/site-packages/idna/__pycache__/core.cpython-310.pyc +0 -0
  5. parrot/lib/python3.10/site-packages/idna/__pycache__/package_data.cpython-310.pyc +0 -0
  6. parrot/lib/python3.10/site-packages/matplotlib-3.9.2.dist-info/LICENSE +99 -0
  7. parrot/lib/python3.10/site-packages/mistral_inference/__init__.py +1 -0
  8. parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/__init__.cpython-310.pyc +0 -0
  9. parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/args.cpython-310.pyc +0 -0
  10. parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/cache.cpython-310.pyc +0 -0
  11. parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/generate.cpython-310.pyc +0 -0
  12. parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/lora.cpython-310.pyc +0 -0
  13. parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/main.cpython-310.pyc +0 -0
  14. parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/mamba.cpython-310.pyc +0 -0
  15. parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/model.cpython-310.pyc +0 -0
  16. parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/moe.cpython-310.pyc +0 -0
  17. parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/rope.cpython-310.pyc +0 -0
  18. parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/transformer.cpython-310.pyc +0 -0
  19. parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/transformer_layers.cpython-310.pyc +0 -0
  20. parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/vision_encoder.cpython-310.pyc +0 -0
  21. parrot/lib/python3.10/site-packages/mistral_inference/args.py +64 -0
  22. parrot/lib/python3.10/site-packages/mistral_inference/cache.py +201 -0
  23. parrot/lib/python3.10/site-packages/mistral_inference/generate.py +169 -0
  24. parrot/lib/python3.10/site-packages/mistral_inference/lora.py +155 -0
  25. parrot/lib/python3.10/site-packages/mistral_inference/main.py +273 -0
  26. parrot/lib/python3.10/site-packages/mistral_inference/mamba.py +83 -0
  27. parrot/lib/python3.10/site-packages/mistral_inference/model.py +43 -0
  28. parrot/lib/python3.10/site-packages/mistral_inference/moe.py +32 -0
  29. parrot/lib/python3.10/site-packages/mistral_inference/rope.py +51 -0
  30. parrot/lib/python3.10/site-packages/mistral_inference/transformer.py +292 -0
  31. parrot/lib/python3.10/site-packages/mistral_inference/transformer_layers.py +169 -0
  32. parrot/lib/python3.10/site-packages/mistral_inference/vision_encoder.py +146 -0
  33. parrot/lib/python3.10/site-packages/pillow.libs/libXau-154567c4.so.6.0.0 +0 -0
  34. parrot/lib/python3.10/site-packages/pillow.libs/libbrotlidec-ba690955.so.1 +0 -0
  35. parrot/lib/python3.10/site-packages/pillow.libs/libsharpyuv-898c0cb5.so.0.1.0 +0 -0
  36. parrot/lib/python3.10/site-packages/pillow.libs/libwebpdemux-f2642bcc.so.2.0.15 +0 -0
  37. parrot/lib/python3.10/site-packages/pillow.libs/libwebpmux-d524b4d5.so.3.1.0 +0 -0
  38. parrot/lib/python3.10/site-packages/scripts/__pycache__/test_imports.cpython-310.pyc +0 -0
  39. parrot/lib/python3.10/site-packages/scripts/__pycache__/test_leak.cpython-310.pyc +0 -0
  40. parrot/lib/python3.10/site-packages/scripts/run_emscripten_tests.py +343 -0
  41. parrot/lib/python3.10/site-packages/scripts/test_leak.py +110 -0
  42. parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/PKG-INFO +142 -0
  43. parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/SOURCES.txt +571 -0
  44. parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/dependency_links.txt +1 -0
  45. parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/entry_points.txt +51 -0
  46. parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/requires.txt +85 -0
  47. parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/top_level.txt +3 -0
  48. parrot/lib/python3.10/site-packages/shellingham/__init__.py +23 -0
  49. parrot/lib/python3.10/site-packages/shellingham/__pycache__/__init__.cpython-310.pyc +0 -0
  50. parrot/lib/python3.10/site-packages/shellingham/__pycache__/_core.cpython-310.pyc +0 -0
parrot/lib/python3.10/site-packages/httpx-0.24.0.dist-info/WHEEL ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.14.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
parrot/lib/python3.10/site-packages/idna/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (820 Bytes). View file
 
parrot/lib/python3.10/site-packages/idna/__pycache__/compat.cpython-310.pyc ADDED
Binary file (723 Bytes). View file
 
parrot/lib/python3.10/site-packages/idna/__pycache__/core.cpython-310.pyc ADDED
Binary file (9.64 kB). View file
 
parrot/lib/python3.10/site-packages/idna/__pycache__/package_data.cpython-310.pyc ADDED
Binary file (180 Bytes). View file
 
parrot/lib/python3.10/site-packages/matplotlib-3.9.2.dist-info/LICENSE ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ License agreement for matplotlib versions 1.3.0 and later
2
+ =========================================================
3
+
4
+ 1. This LICENSE AGREEMENT is between the Matplotlib Development Team
5
+ ("MDT"), and the Individual or Organization ("Licensee") accessing and
6
+ otherwise using matplotlib software in source or binary form and its
7
+ associated documentation.
8
+
9
+ 2. Subject to the terms and conditions of this License Agreement, MDT
10
+ hereby grants Licensee a nonexclusive, royalty-free, world-wide license
11
+ to reproduce, analyze, test, perform and/or display publicly, prepare
12
+ derivative works, distribute, and otherwise use matplotlib
13
+ alone or in any derivative version, provided, however, that MDT's
14
+ License Agreement and MDT's notice of copyright, i.e., "Copyright (c)
15
+ 2012- Matplotlib Development Team; All Rights Reserved" are retained in
16
+ matplotlib alone or in any derivative version prepared by
17
+ Licensee.
18
+
19
+ 3. In the event Licensee prepares a derivative work that is based on or
20
+ incorporates matplotlib or any part thereof, and wants to
21
+ make the derivative work available to others as provided herein, then
22
+ Licensee hereby agrees to include in any such work a brief summary of
23
+ the changes made to matplotlib .
24
+
25
+ 4. MDT is making matplotlib available to Licensee on an "AS
26
+ IS" basis. MDT MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
27
+ IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, MDT MAKES NO AND
28
+ DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
29
+ FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB
30
+ WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
31
+
32
+ 5. MDT SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB
33
+ FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR
34
+ LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING
35
+ MATPLOTLIB , OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF
36
+ THE POSSIBILITY THEREOF.
37
+
38
+ 6. This License Agreement will automatically terminate upon a material
39
+ breach of its terms and conditions.
40
+
41
+ 7. Nothing in this License Agreement shall be deemed to create any
42
+ relationship of agency, partnership, or joint venture between MDT and
43
+ Licensee. This License Agreement does not grant permission to use MDT
44
+ trademarks or trade name in a trademark sense to endorse or promote
45
+ products or services of Licensee, or any third party.
46
+
47
+ 8. By copying, installing or otherwise using matplotlib ,
48
+ Licensee agrees to be bound by the terms and conditions of this License
49
+ Agreement.
50
+
51
+ License agreement for matplotlib versions prior to 1.3.0
52
+ ========================================================
53
+
54
+ 1. This LICENSE AGREEMENT is between John D. Hunter ("JDH"), and the
55
+ Individual or Organization ("Licensee") accessing and otherwise using
56
+ matplotlib software in source or binary form and its associated
57
+ documentation.
58
+
59
+ 2. Subject to the terms and conditions of this License Agreement, JDH
60
+ hereby grants Licensee a nonexclusive, royalty-free, world-wide license
61
+ to reproduce, analyze, test, perform and/or display publicly, prepare
62
+ derivative works, distribute, and otherwise use matplotlib
63
+ alone or in any derivative version, provided, however, that JDH's
64
+ License Agreement and JDH's notice of copyright, i.e., "Copyright (c)
65
+ 2002-2011 John D. Hunter; All Rights Reserved" are retained in
66
+ matplotlib alone or in any derivative version prepared by
67
+ Licensee.
68
+
69
+ 3. In the event Licensee prepares a derivative work that is based on or
70
+ incorporates matplotlib or any part thereof, and wants to
71
+ make the derivative work available to others as provided herein, then
72
+ Licensee hereby agrees to include in any such work a brief summary of
73
+ the changes made to matplotlib.
74
+
75
+ 4. JDH is making matplotlib available to Licensee on an "AS
76
+ IS" basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
77
+ IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND
78
+ DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
79
+ FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB
80
+ WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
81
+
82
+ 5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB
83
+ FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR
84
+ LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING
85
+ MATPLOTLIB , OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF
86
+ THE POSSIBILITY THEREOF.
87
+
88
+ 6. This License Agreement will automatically terminate upon a material
89
+ breach of its terms and conditions.
90
+
91
+ 7. Nothing in this License Agreement shall be deemed to create any
92
+ relationship of agency, partnership, or joint venture between JDH and
93
+ Licensee. This License Agreement does not grant permission to use JDH
94
+ trademarks or trade name in a trademark sense to endorse or promote
95
+ products or services of Licensee, or any third party.
96
+
97
+ 8. By copying, installing or otherwise using matplotlib,
98
+ Licensee agrees to be bound by the terms and conditions of this License
99
+ Agreement.
parrot/lib/python3.10/site-packages/mistral_inference/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __version__ = "1.4.0"
parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (190 Bytes). View file
 
parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/args.cpython-310.pyc ADDED
Binary file (2.14 kB). View file
 
parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/cache.cpython-310.pyc ADDED
Binary file (7.55 kB). View file
 
parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/generate.cpython-310.pyc ADDED
Binary file (5.21 kB). View file
 
parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/lora.cpython-310.pyc ADDED
Binary file (5.38 kB). View file
 
parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/main.cpython-310.pyc ADDED
Binary file (7.64 kB). View file
 
parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/mamba.cpython-310.pyc ADDED
Binary file (2.97 kB). View file
 
parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/model.cpython-310.pyc ADDED
Binary file (1.68 kB). View file
 
parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/moe.cpython-310.pyc ADDED
Binary file (1.63 kB). View file
 
parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/rope.cpython-310.pyc ADDED
Binary file (1.74 kB). View file
 
parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/transformer.cpython-310.pyc ADDED
Binary file (8.56 kB). View file
 
parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/transformer_layers.cpython-310.pyc ADDED
Binary file (5.82 kB). View file
 
parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/vision_encoder.cpython-310.pyc ADDED
Binary file (5.22 kB). View file
 
parrot/lib/python3.10/site-packages/mistral_inference/args.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Optional
3
+
4
+ from simple_parsing.helpers import Serializable
5
+
6
+ from mistral_inference.lora import LoraArgs
7
+ from mistral_inference.moe import MoeArgs
8
+
9
+
10
+ @dataclass
11
+ class VisionEncoderArgs:
12
+ hidden_size: int
13
+ num_channels: int
14
+ image_size: int
15
+ patch_size: int
16
+ intermediate_size: int
17
+ num_hidden_layers: int
18
+ num_attention_heads: int
19
+ rope_theta: float = 1e4 # for rope-2D
20
+ image_token_id: int = 10
21
+
22
+
23
+ @dataclass
24
+ class TransformerArgs(Serializable):
25
+ dim: int
26
+ n_layers: int
27
+ head_dim: int
28
+ hidden_dim: int
29
+ n_heads: int
30
+ n_kv_heads: int
31
+ norm_eps: float
32
+ vocab_size: int
33
+
34
+ max_batch_size: int = 0
35
+
36
+ # For rotary embeddings. If not set, will be inferred
37
+ rope_theta: Optional[float] = None
38
+ # If this is set, we will use MoE layers instead of dense layers.
39
+ moe: Optional[MoeArgs] = None
40
+ # If this is set, we will load LoRA linear layers instead of linear layers.
41
+ lora: Optional[LoraArgs] = None
42
+ model_type: str = "transformer"
43
+
44
+ vision_encoder: Optional[VisionEncoderArgs] = None
45
+
46
+ def __post_init__(self) -> None:
47
+ assert self.model_type == "transformer", self.model_type
48
+
49
+
50
+ @dataclass
51
+ class MambaArgs(Serializable):
52
+ dim: int
53
+ n_layers: int
54
+ vocab_size: int
55
+ n_groups: int
56
+ rms_norm: bool
57
+ residual_in_fp32: bool
58
+ fused_add_norm: bool
59
+ pad_vocab_size_multiple: int
60
+ tie_embeddings: bool
61
+ model_type: str = "mamba"
62
+
63
+ def __post_init__(self) -> None:
64
+ assert self.model_type == "mamba", self.model_type
parrot/lib/python3.10/site-packages/mistral_inference/cache.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import List, Optional, Tuple
3
+
4
+ import torch
5
+ from xformers.ops.fmha.attn_bias import ( # type: ignore
6
+ AttentionBias,
7
+ BlockDiagonalCausalMask,
8
+ BlockDiagonalCausalWithOffsetPaddedKeysMask,
9
+ BlockDiagonalMask,
10
+ )
11
+
12
+
13
+ @dataclass
14
+ class CacheInputMetadata:
15
+ # rope absolute positions
16
+ positions: torch.Tensor
17
+ # where tokens should go in the cache
18
+ cache_positions: torch.Tensor
19
+
20
+ # if prefill, use block diagonal causal mask
21
+ # else use causal with padded key mask
22
+ prefill: bool
23
+ mask: AttentionBias
24
+ seqlens: List[int]
25
+
26
+
27
+ def interleave_list(l1: List[torch.Tensor], l2: List[torch.Tensor]) -> List[torch.Tensor]:
28
+ assert len(l1) == len(l2)
29
+ return [v for pair in zip(l1, l2) for v in pair]
30
+
31
+
32
+ class CacheView:
33
+ def __init__(
34
+ self,
35
+ cache_k: torch.Tensor,
36
+ cache_v: torch.Tensor,
37
+ metadata: CacheInputMetadata,
38
+ kv_seqlens: torch.Tensor,
39
+ ):
40
+ self.cache_k = cache_k
41
+ self.cache_v = cache_v
42
+ self.kv_seqlens = kv_seqlens
43
+ self.metadata = metadata
44
+
45
+ def update(self, xk: torch.Tensor, xv: torch.Tensor) -> None:
46
+ """
47
+ to_cache_mask masks the last [max_seq_len] tokens in each sequence
48
+ """
49
+ n_kv_heads, head_dim = self.cache_k.shape[-2:]
50
+ flat_cache_k = self.cache_k.view(-1, n_kv_heads, head_dim)
51
+ flat_cache_v = self.cache_v.view(-1, n_kv_heads, head_dim)
52
+
53
+ flat_cache_k.index_copy_(0, self.metadata.cache_positions, xk)
54
+ flat_cache_v.index_copy_(0, self.metadata.cache_positions, xv)
55
+
56
+ def interleave_kv(self, xk: torch.Tensor, xv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
57
+ """
58
+ This is a naive implementation and not optimized for speed.
59
+ """
60
+ assert xk.ndim == xv.ndim == 3 # (B * T, H, D)
61
+ assert xk.shape == xv.shape
62
+
63
+ if all([s == 0 for s in self.metadata.seqlens]):
64
+ # No cache to interleave
65
+ return xk, xv
66
+
67
+ # Make it a list of [(T, H, D)]
68
+ xk: Tuple[torch.Tensor] = torch.split(xk, self.metadata.seqlens) # type: ignore
69
+ xv: Tuple[torch.Tensor] = torch.split(xv, self.metadata.seqlens) # type: ignore
70
+ assert len(xk) == len(self.kv_seqlens), f"Batch size is {len(self.kv_seqlens)}, got {len(xk)}"
71
+
72
+ # Retrieve cache
73
+ cache_k = [cache_k[:seq_len] for cache_k, seq_len in zip(self.cache_k, self.kv_seqlens)]
74
+ cache_v = [cache_v[:seq_len] for cache_v, seq_len in zip(self.cache_v, self.kv_seqlens)]
75
+
76
+ interleaved_k = interleave_list(cache_k, list(xk))
77
+ interleaved_v = interleave_list(cache_v, list(xv))
78
+
79
+ return torch.cat(interleaved_k, dim=0), torch.cat(interleaved_v, dim=0)
80
+
81
+ @property
82
+ def max_seq_len(self) -> int:
83
+ return self.cache_k.shape[1]
84
+
85
+ @property
86
+ def key(self) -> torch.Tensor:
87
+ return self.cache_k[: len(self.kv_seqlens)]
88
+
89
+ @property
90
+ def value(self) -> torch.Tensor:
91
+ return self.cache_v[: len(self.kv_seqlens)]
92
+
93
+ @property
94
+ def prefill(self) -> bool:
95
+ return self.metadata.prefill
96
+
97
+ @property
98
+ def mask(self) -> AttentionBias:
99
+ return self.metadata.mask
100
+
101
+
102
+ class BufferCache:
103
+ """
104
+ This is an example that implements a buffer cache, allowing for variable length sequences.
105
+ Allocated cache is rectangular which is wasteful (see PagedAttention for better mechanisms)
106
+ """
107
+
108
+ def __init__(
109
+ self,
110
+ n_layers: int,
111
+ max_batch_size: int,
112
+ max_seq_len: int,
113
+ n_kv_heads: int,
114
+ head_dim: int,
115
+ ):
116
+ self.max_seq_len = max_seq_len
117
+ self.n_kv_heads = n_kv_heads
118
+ self.head_dim = head_dim
119
+
120
+ self.cache_k = torch.empty((n_layers, max_batch_size, max_seq_len, n_kv_heads, head_dim))
121
+ self.cache_v = torch.empty((n_layers, max_batch_size, max_seq_len, n_kv_heads, head_dim))
122
+ # holds the valid length for each batch element in the cache
123
+ self.kv_seqlens: Optional[torch.Tensor] = None
124
+
125
+ def get_view(self, layer_id: int, metadata: CacheInputMetadata) -> CacheView:
126
+ assert self.kv_seqlens is not None
127
+ return CacheView(self.cache_k[layer_id], self.cache_v[layer_id], metadata, self.kv_seqlens)
128
+
129
+ def reset(self) -> None:
130
+ self.kv_seqlens = None
131
+
132
+ def init_kvseqlens(self, batch_size: int) -> None:
133
+ self.kv_seqlens = torch.zeros((batch_size,), device=self.device, dtype=torch.long)
134
+
135
+ @property
136
+ def device(self) -> torch.device:
137
+ return self.cache_k.device
138
+
139
+ def to(self, device: torch.device, dtype: torch.dtype) -> "BufferCache":
140
+ self.cache_k = self.cache_k.to(device=device, dtype=dtype)
141
+ self.cache_v = self.cache_v.to(device=device, dtype=dtype)
142
+
143
+ return self
144
+
145
+ def update_seqlens(self, seqlens: List[int]) -> None:
146
+ assert self.kv_seqlens is not None
147
+ self.kv_seqlens += torch.tensor(seqlens, device=self.device, dtype=torch.long)
148
+
149
+ def get_input_metadata(self, seqlens: List[int]) -> CacheInputMetadata:
150
+ """
151
+ Get metadata about cache positions
152
+ """
153
+ if self.kv_seqlens is None:
154
+ self.init_kvseqlens(len(seqlens))
155
+
156
+ assert isinstance(self.kv_seqlens, torch.Tensor)
157
+ assert len(seqlens) == len(
158
+ self.kv_seqlens
159
+ ), f"Batch size is {len(self.kv_seqlens)}, got {len(seqlens)}, did you forget to reset cache?"
160
+ seqpos = self.kv_seqlens.tolist()
161
+
162
+ assert len(seqlens) > 0, seqlens
163
+ cached_elements = torch.tensor(seqlens, device=self.device, dtype=torch.long)
164
+
165
+ positions = torch.cat([torch.arange(pos, pos + seqlen) for pos, seqlen in zip(seqpos, seqlens)]).to(
166
+ device=self.device, dtype=torch.long
167
+ )
168
+
169
+ batch_idx = torch.tensor(
170
+ sum([[i] * seqlen for i, seqlen in enumerate(seqlens)], []),
171
+ device=self.device,
172
+ dtype=torch.long,
173
+ )
174
+ cache_positions = positions + batch_idx * self.max_seq_len
175
+
176
+ first_prefill = seqpos[0] == 0
177
+ subsequent_prefill = any(seqlen > 1 for seqlen in seqlens)
178
+ if first_prefill:
179
+ assert all([pos == 0 for pos in seqpos]), seqpos
180
+ mask = BlockDiagonalCausalMask.from_seqlens(seqlens).make_local_attention(self.max_seq_len)
181
+ elif subsequent_prefill:
182
+ mask = BlockDiagonalMask.from_seqlens(
183
+ q_seqlen=seqlens,
184
+ kv_seqlen=[
185
+ s + cached_s.clamp(max=self.max_seq_len).item() for (s, cached_s) in zip(seqlens, self.kv_seqlens)
186
+ ],
187
+ ).make_local_attention_from_bottomright(self.max_seq_len)
188
+ else:
189
+ mask = BlockDiagonalCausalWithOffsetPaddedKeysMask.from_seqlens(
190
+ q_seqlen=seqlens,
191
+ kv_padding=self.max_seq_len,
192
+ kv_seqlen=(self.kv_seqlens + cached_elements).clamp(max=self.max_seq_len).tolist(),
193
+ )
194
+
195
+ return CacheInputMetadata(
196
+ positions=positions,
197
+ cache_positions=cache_positions,
198
+ prefill=first_prefill or subsequent_prefill,
199
+ mask=mask,
200
+ seqlens=seqlens,
201
+ )
parrot/lib/python3.10/site-packages/mistral_inference/generate.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Tuple
2
+
3
+ import numpy as np
4
+ import torch
5
+
6
+ from mistral_inference.cache import BufferCache
7
+ from mistral_inference.mamba import Mamba
8
+ from mistral_inference.transformer import Transformer
9
+
10
+
11
+ @torch.inference_mode()
12
+ def generate_mamba(
13
+ encoded_prompts: List[List[int]],
14
+ model: Mamba,
15
+ *,
16
+ max_tokens: int,
17
+ temperature: float,
18
+ chunk_size: Optional[int] = None,
19
+ eos_id: Optional[int] = None,
20
+ ) -> Tuple[List[List[int]], List[List[float]]]:
21
+ input_ids = torch.tensor(encoded_prompts, device=model.device)
22
+ output = model.model.generate(
23
+ input_ids=input_ids,
24
+ max_length=input_ids.shape[-1] + max_tokens,
25
+ cg=True,
26
+ return_dict_in_generate=True,
27
+ output_scores=True,
28
+ enable_timing=False,
29
+ eos_token_id=eos_id,
30
+ temperature=temperature,
31
+ top_p=0.8,
32
+ )
33
+ generated_tokens = output.sequences[:, input_ids.shape[-1] :].tolist()
34
+
35
+ _logprobs: List[List[float]] = [[] for _ in range(len(generated_tokens))]
36
+ for seq_idx, batch_score in enumerate(output.scores):
37
+ for batch_idx, score in enumerate(batch_score.tolist()):
38
+ _logprobs[batch_idx].append(score[generated_tokens[batch_idx][seq_idx]])
39
+
40
+ return generated_tokens, _logprobs
41
+
42
+
43
+ @torch.inference_mode()
44
+ def generate(
45
+ encoded_prompts: List[List[int]],
46
+ model: Transformer,
47
+ images: List[List[np.ndarray]] = [],
48
+ *,
49
+ max_tokens: int,
50
+ temperature: float,
51
+ chunk_size: Optional[int] = None,
52
+ eos_id: Optional[int] = None,
53
+ ) -> Tuple[List[List[int]], List[List[float]]]:
54
+ images_torch: List[List[torch.Tensor]] = []
55
+ if images:
56
+ assert chunk_size is None
57
+ images_torch = [
58
+ [torch.tensor(im, device=model.device, dtype=model.dtype) for im in images_for_sample]
59
+ for images_for_sample in images
60
+ ]
61
+
62
+ model = model.eval()
63
+ B, V = len(encoded_prompts), model.args.vocab_size
64
+
65
+ seqlens = [len(x) for x in encoded_prompts]
66
+
67
+ # Cache
68
+ cache_window = max(seqlens) + max_tokens
69
+ cache = BufferCache(
70
+ model.n_local_layers,
71
+ model.args.max_batch_size,
72
+ cache_window,
73
+ model.args.n_kv_heads,
74
+ model.args.head_dim,
75
+ )
76
+ cache.to(device=model.device, dtype=model.dtype)
77
+ cache.reset()
78
+
79
+ # Bookkeeping
80
+ logprobs: List[List[float]] = [[] for _ in range(B)]
81
+ last_token_prelogits = None
82
+
83
+ # One chunk if size not specified
84
+ max_prompt_len = max(seqlens)
85
+ if chunk_size is None:
86
+ chunk_size = max_prompt_len
87
+
88
+ flattened_images: List[torch.Tensor] = sum(images_torch, [])
89
+
90
+ # Encode prompt by chunks
91
+ for s in range(0, max_prompt_len, chunk_size):
92
+ prompt_chunks = [p[s : s + chunk_size] for p in encoded_prompts]
93
+ assert all(len(p) > 0 for p in prompt_chunks)
94
+ prelogits = model.forward(
95
+ torch.tensor(sum(prompt_chunks, []), device=model.device, dtype=torch.long),
96
+ images=flattened_images,
97
+ seqlens=[len(p) for p in prompt_chunks],
98
+ cache=cache,
99
+ )
100
+ logits = torch.log_softmax(prelogits, dim=-1)
101
+
102
+ if last_token_prelogits is not None:
103
+ # Pass > 1
104
+ last_token_logits = torch.log_softmax(last_token_prelogits, dim=-1)
105
+ for i_seq in range(B):
106
+ logprobs[i_seq].append(last_token_logits[i_seq, prompt_chunks[i_seq][0]].item())
107
+
108
+ offset = 0
109
+ for i_seq, sequence in enumerate(prompt_chunks):
110
+ logprobs[i_seq].extend([logits[offset + i, sequence[i + 1]].item() for i in range(len(sequence) - 1)])
111
+ offset += len(sequence)
112
+
113
+ last_token_prelogits = prelogits.index_select(
114
+ 0,
115
+ torch.tensor([len(p) for p in prompt_chunks], device=prelogits.device).cumsum(dim=0) - 1,
116
+ )
117
+ assert last_token_prelogits.shape == (B, V)
118
+
119
+ # decode
120
+ generated_tensors = []
121
+ is_finished = torch.tensor([False for _ in range(B)])
122
+
123
+ assert last_token_prelogits is not None
124
+ for _ in range(max_tokens):
125
+ next_token = sample(last_token_prelogits, temperature=temperature, top_p=0.8)
126
+
127
+ if eos_id is not None:
128
+ is_finished = is_finished | (next_token == eos_id).cpu()
129
+
130
+ if is_finished.all():
131
+ break
132
+
133
+ last_token_logits = torch.log_softmax(last_token_prelogits, dim=-1)
134
+ for i in range(B):
135
+ logprobs[i].append(last_token_logits[i, next_token[i]].item())
136
+
137
+ generated_tensors.append(next_token[:, None])
138
+ last_token_prelogits = model.forward(next_token, seqlens=[1] * B, cache=cache)
139
+ assert last_token_prelogits.shape == (B, V)
140
+
141
+ generated_tokens: List[List[int]]
142
+ if generated_tensors:
143
+ generated_tokens = torch.cat(generated_tensors, 1).tolist()
144
+ else:
145
+ generated_tokens = []
146
+
147
+ return generated_tokens, logprobs
148
+
149
+
150
+ def sample(logits: torch.Tensor, temperature: float, top_p: float) -> torch.Tensor:
151
+ if temperature > 0:
152
+ probs = torch.softmax(logits / temperature, dim=-1)
153
+ next_token = sample_top_p(probs, top_p)
154
+ else:
155
+ next_token = torch.argmax(logits, dim=-1).unsqueeze(0)
156
+
157
+ return next_token.reshape(-1)
158
+
159
+
160
+ def sample_top_p(probs: torch.Tensor, p: float) -> torch.Tensor:
161
+ assert 0 <= p <= 1
162
+
163
+ probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
164
+ probs_sum = torch.cumsum(probs_sort, dim=-1)
165
+ mask = probs_sum - probs_sort > p
166
+ probs_sort[mask] = 0.0
167
+ probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
168
+ next_token = torch.multinomial(probs_sort, num_samples=1)
169
+ return torch.gather(probs_idx, -1, next_token)
parrot/lib/python3.10/site-packages/mistral_inference/lora.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from dataclasses import dataclass
3
+ from pathlib import Path
4
+ from typing import Any, Dict, NamedTuple, Union
5
+
6
+ import safetensors.torch
7
+ import torch
8
+ import torch.nn as nn
9
+ from simple_parsing.helpers import Serializable
10
+
11
+
12
+ @dataclass
13
+ class LoraArgs(Serializable):
14
+ rank: int
15
+ scaling: float
16
+
17
+ def __post_init__(self) -> None:
18
+ assert self.rank > 0
19
+ assert self.scaling > 0.0
20
+
21
+
22
+ class LoRALinear(nn.Module):
23
+ """
24
+ Implementation of:
25
+ - LoRA: https://arxiv.org/abs/2106.09685
26
+
27
+ Notes:
28
+ - Freezing is handled at network level, not layer level.
29
+ - Scaling factor controls relative importance of LoRA skip
30
+ connection versus original frozen weight. General guidance is
31
+ to keep it to 2.0 and sweep over learning rate when changing
32
+ the rank.
33
+ """
34
+
35
+ def __init__(
36
+ self,
37
+ in_features: int,
38
+ out_features: int,
39
+ rank: int,
40
+ scaling: float,
41
+ bias: bool = False,
42
+ ):
43
+ super().__init__()
44
+
45
+ self.in_features = in_features
46
+ self.out_features = out_features
47
+ assert not bias
48
+ self.bias = bias
49
+ self.rank = rank
50
+ self.scaling = scaling
51
+
52
+ self.lora_A = nn.Linear(
53
+ self.in_features,
54
+ self.rank,
55
+ bias=self.bias,
56
+ )
57
+ self.lora_B = nn.Linear(
58
+ self.rank,
59
+ self.out_features,
60
+ bias=self.bias,
61
+ )
62
+
63
+ self.linear = nn.Linear(self.in_features, self.out_features, bias=self.bias)
64
+
65
+ # make sure no LoRA weights are marked as "missing" in load_state_dict
66
+ def ignore_missing_keys(m: nn.Module, incompatible_keys: NamedTuple) -> None:
67
+ incompatible_keys.missing_keys[:] = [] # type: ignore
68
+
69
+ self.register_load_state_dict_post_hook(ignore_missing_keys)
70
+
71
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
72
+ lora = self.lora_B(self.lora_A(x))
73
+ result: torch.Tensor = self.linear(x) + lora * self.scaling
74
+ return result
75
+
76
+ def _load_from_state_dict(self, state_dict: Dict[str, Any], prefix: str, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
77
+ key_name = prefix + "weight"
78
+
79
+ # full checkpoint
80
+ if key_name in state_dict:
81
+ w_ref = state_dict[key_name]
82
+
83
+ # load frozen weights
84
+ state_dict = {
85
+ "linear.weight": w_ref,
86
+ "lora_A.weight": torch.zeros_like(self.lora_A.weight, device=w_ref.device, dtype=w_ref.dtype),
87
+ "lora_B.weight": torch.zeros_like(self.lora_B.weight, device=w_ref.device, dtype=w_ref.dtype),
88
+ }
89
+ self.load_state_dict(state_dict, assign=True, strict=True)
90
+
91
+
92
+ class LoRALoaderMixin:
93
+ def load_lora(self, lora_path: Union[Path, str], scaling: float = 2.0) -> None:
94
+ """Loads LoRA checkpoint"""
95
+
96
+ lora_path = Path(lora_path)
97
+ assert lora_path.is_file(), f"{lora_path} does not exist or is not a file"
98
+
99
+ state_dict = safetensors.torch.load_file(lora_path)
100
+
101
+ self._load_lora_state_dict(state_dict, scaling=scaling)
102
+
103
+ def _load_lora_state_dict(self, lora_state_dict: Dict[str, torch.Tensor], scaling: float = 2.0) -> None:
104
+ """Loads LoRA state_dict"""
105
+ lora_dtypes = set([p.dtype for p in lora_state_dict.values()])
106
+ assert (
107
+ len(lora_dtypes) == 1
108
+ ), f"LoRA weights have multiple different dtypes {lora_dtypes}. All weights need to have the same dtype"
109
+ lora_dtype = lora_dtypes.pop()
110
+ assert lora_dtype == self.dtype, f"LoRA weights dtype differs from model's dtype {lora_dtype} != {self.dtype}" # type: ignore[attr-defined]
111
+ assert all("lora" in key for key in lora_state_dict.keys())
112
+
113
+ # move tensors to device
114
+ lora_state_dict = {k: v.to(self.device) for k, v in lora_state_dict.items()} # type: ignore[attr-defined]
115
+
116
+ state_dict = self.state_dict() # type: ignore[attr-defined]
117
+
118
+ if self.args.lora is None: # type: ignore[attr-defined]
119
+ logging.info("Loading and merging LoRA weights...")
120
+
121
+ # replace every nn.Linear with a LoRALinear with 'meta' device except the output layer
122
+ named_modules = dict(self.named_modules()) # type: ignore[attr-defined]
123
+ for name, module in named_modules.items():
124
+ if isinstance(module, nn.Linear) and name != "output":
125
+ layer_id = name.split(".")[1]
126
+ if layer_id not in self.layers: # type: ignore[attr-defined]
127
+ logging.debug(
128
+ "Skipping parameter %s at pipeline rank %d",
129
+ name,
130
+ self.pipeline_rank, # type: ignore[attr-defined]
131
+ )
132
+ elif (name + ".lora_B.weight") in lora_state_dict:
133
+ weight = (
134
+ module.weight
135
+ + (lora_state_dict[name + ".lora_B.weight"] @ lora_state_dict[name + ".lora_A.weight"])
136
+ * scaling
137
+ )
138
+
139
+ state_dict[name + ".weight"] = weight
140
+ else:
141
+ logging.info("Loading LoRA weights...")
142
+ for k, v in lora_state_dict.items():
143
+ state_dict.update(lora_state_dict)
144
+
145
+ layer_id = k.split(".")[1]
146
+ if layer_id in self.layers: # type: ignore[attr-defined]
147
+ state_dict[k] = v
148
+ else:
149
+ logging.debug(
150
+ "Skipping parameter %s at pipeline rank %d",
151
+ k,
152
+ self.pipeline_rank, # type: ignore[attr-defined]
153
+ )
154
+
155
+ self.load_state_dict(state_dict, strict=True) # type: ignore[attr-defined]
parrot/lib/python3.10/site-packages/mistral_inference/main.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ import os
4
+ import warnings
5
+ from pathlib import Path
6
+ from typing import List, Optional, Tuple, Type, Union
7
+
8
+ import fire # type: ignore
9
+ import torch
10
+ import torch.distributed as dist
11
+ from mistral_common.protocol.instruct.messages import (
12
+ AssistantMessage,
13
+ ContentChunk,
14
+ ImageChunk,
15
+ ImageURLChunk,
16
+ TextChunk,
17
+ UserMessage,
18
+ )
19
+ from mistral_common.protocol.instruct.request import ChatCompletionRequest
20
+ from mistral_common.tokens.tokenizers.base import Tokenizer
21
+ from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
22
+ from mistral_common.tokens.tokenizers.sentencepiece import is_sentencepiece
23
+ from mistral_common.tokens.tokenizers.tekken import (
24
+ SpecialTokenPolicy,
25
+ Tekkenizer,
26
+ is_tekken,
27
+ )
28
+ from PIL import Image
29
+
30
+ from mistral_inference.args import TransformerArgs
31
+ from mistral_inference.generate import generate, generate_mamba
32
+ from mistral_inference.mamba import Mamba
33
+ from mistral_inference.transformer import Transformer
34
+
35
+
36
+ def is_torchrun() -> bool:
37
+ required_vars = ["MASTER_ADDR", "MASTER_PORT", "RANK", "WORLD_SIZE"]
38
+ return all(var in os.environ for var in required_vars)
39
+
40
+
41
+ def load_tokenizer(model_path: Path) -> MistralTokenizer:
42
+ tokenizer = [f for f in os.listdir(model_path) if is_tekken(model_path / f) or is_sentencepiece(model_path / f)]
43
+ assert (
44
+ len(tokenizer) > 0
45
+ ), f"No tokenizer in {model_path}, place a `tokenizer.model.[v1,v2,v3]` or `tekken.json` file in {model_path}."
46
+ assert (
47
+ len(tokenizer) == 1
48
+ ), f"Multiple tokenizers {', '.join(tokenizer)} found in `model_path`, make sure to only have one tokenizer"
49
+
50
+ mistral_tokenizer = MistralTokenizer.from_file(str(model_path / tokenizer[0]))
51
+
52
+ if isinstance(mistral_tokenizer.instruct_tokenizer.tokenizer, Tekkenizer):
53
+ mistral_tokenizer.instruct_tokenizer.tokenizer.special_token_policy = SpecialTokenPolicy.KEEP
54
+
55
+ logging.info(f"Loaded tokenizer of type {mistral_tokenizer.instruct_tokenizer.__class__}")
56
+
57
+ return mistral_tokenizer
58
+
59
+
60
+ def get_model_cls(model_path: str) -> Union[Type[Mamba], Type[Transformer]]:
61
+ with open(Path(model_path) / "params.json", "r") as f:
62
+ args_dict = json.load(f)
63
+
64
+ return {"mamba": Mamba, "transformer": Transformer}[args_dict.get("model_type", "transformer")] # type: ignore[return-value]
65
+
66
+
67
+ def pad_and_convert_to_tensor(list_of_lists: List[List[int]], pad_id: int) -> List[List[int]]:
68
+ # Determine the length of the longest list
69
+ max_len = max(len(lst) for lst in list_of_lists)
70
+
71
+ # Left pad each list to the maximum length
72
+ padded_lists = [[pad_id] * (max_len - len(lst)) + lst for lst in list_of_lists]
73
+
74
+ return padded_lists
75
+
76
+
77
+ def _get_multimodal_input() -> Tuple[UserMessage, bool]:
78
+ chunks: List[ContentChunk] = []
79
+
80
+ response = input("Text prompt: ")
81
+ if response:
82
+ chunks.append(TextChunk(text=response))
83
+
84
+ print("[You can input zero, one or more images now.]")
85
+ while True:
86
+ did_something = False
87
+ response = input("Image path or url [Leave empty and press enter to finish image input]: ")
88
+ if response:
89
+ if Path(response).is_file():
90
+ chunks.append(ImageChunk(image=Image.open(response)))
91
+ else:
92
+ assert response.startswith("http"), f"{response} does not seem to be a valid url."
93
+ chunks.append(ImageURLChunk(image_url=response))
94
+ did_something = True
95
+
96
+ if not did_something:
97
+ break
98
+
99
+ return UserMessage(content=chunks), not chunks
100
+
101
+
102
+ def interactive(
103
+ model_path: str,
104
+ max_tokens: int = 35,
105
+ temperature: float = 0.7,
106
+ num_pipeline_ranks: int = 1,
107
+ instruct: bool = False,
108
+ lora_path: Optional[str] = None,
109
+ ) -> None:
110
+ if is_torchrun():
111
+ torch.distributed.init_process_group()
112
+ torch.cuda.set_device(torch.distributed.get_rank())
113
+ should_print = torch.distributed.get_rank() == 0
114
+
115
+ num_pipeline_ranks = torch.distributed.get_world_size()
116
+ else:
117
+ should_print = True
118
+ num_pipeline_ranks = 1
119
+
120
+ mistral_tokenizer: MistralTokenizer = load_tokenizer(Path(model_path))
121
+ tokenizer: Tokenizer = mistral_tokenizer.instruct_tokenizer.tokenizer
122
+
123
+ model_cls = get_model_cls(model_path)
124
+ model = model_cls.from_folder(Path(model_path), max_batch_size=3, num_pipeline_ranks=num_pipeline_ranks)
125
+ is_multimodal = isinstance(model.args, TransformerArgs) and model.args.vision_encoder is not None
126
+
127
+ if is_multimodal:
128
+ assert instruct, "Multimodal models should only be used in instruct mode"
129
+
130
+ # load LoRA
131
+ if lora_path is not None:
132
+ model.load_lora(Path(lora_path))
133
+
134
+ prompt: str = ""
135
+ messages: List[UserMessage | AssistantMessage] = []
136
+
137
+ while True:
138
+ if should_print:
139
+ if not is_multimodal:
140
+ user_input = input("Prompt: ")
141
+
142
+ if instruct:
143
+ if is_multimodal:
144
+ mm_input, finished = _get_multimodal_input()
145
+ if finished:
146
+ break
147
+ messages += [mm_input]
148
+ else:
149
+ messages += [UserMessage(content=user_input)]
150
+ chat_completion_request = ChatCompletionRequest(messages=messages)
151
+
152
+ tokenized = mistral_tokenizer.encode_chat_completion(chat_completion_request)
153
+ tokens = tokenized.tokens
154
+ images = tokenized.images
155
+ else:
156
+ prompt += user_input
157
+
158
+ tokens = tokenizer.encode(prompt, bos=True, eos=False)
159
+ images = []
160
+
161
+ length_tensor = torch.tensor([len(tokens)], dtype=torch.int)
162
+ else:
163
+ length_tensor = torch.tensor([0], dtype=torch.int)
164
+
165
+ if is_torchrun():
166
+ dist.broadcast(length_tensor, src=0)
167
+
168
+ if not should_print:
169
+ tokens = int(length_tensor.item()) * [0]
170
+
171
+ generate_fn = generate if isinstance(model, Transformer) else generate_mamba
172
+ generated_tokens, _ = generate_fn( # type: ignore[operator]
173
+ [tokens],
174
+ model,
175
+ [images],
176
+ max_tokens=max_tokens,
177
+ temperature=temperature,
178
+ eos_id=tokenizer.eos_id,
179
+ )
180
+
181
+ answer = tokenizer.decode(generated_tokens[0])
182
+
183
+ if should_print:
184
+ print(answer)
185
+ print("=====================")
186
+
187
+ if instruct:
188
+ messages += [AssistantMessage(content=answer)]
189
+ else:
190
+ prompt += answer
191
+
192
+
193
+ def demo(
194
+ model_path: str,
195
+ max_tokens: int = 35,
196
+ temperature: float = 0,
197
+ lora_path: Optional[str] = None,
198
+ ) -> None:
199
+ if is_torchrun():
200
+ torch.distributed.init_process_group()
201
+ torch.cuda.set_device(torch.distributed.get_rank())
202
+ should_print = torch.distributed.get_rank() == 0
203
+
204
+ num_pipeline_ranks = torch.distributed.get_world_size()
205
+ else:
206
+ should_print = True
207
+ num_pipeline_ranks = 1
208
+
209
+ model_cls = get_model_cls(model_path)
210
+ model = model_cls.from_folder(Path(model_path), max_batch_size=3, num_pipeline_ranks=num_pipeline_ranks)
211
+ # load LoRA
212
+ if lora_path is not None:
213
+ model.load_lora(Path(lora_path))
214
+
215
+ mistral_tokenizer: MistralTokenizer = load_tokenizer(Path(model_path))
216
+ tokenizer: Tokenizer = mistral_tokenizer.instruct_tokenizer.tokenizer
217
+
218
+ prompts = [
219
+ "This is a test",
220
+ "This is another great test",
221
+ "This is a third test, mistral AI is very good at testing. ",
222
+ ]
223
+
224
+ encoded_prompts = [tokenizer.encode(prompt, bos=True, eos=False) for prompt in prompts]
225
+
226
+ if isinstance(model, Transformer):
227
+ generate_fn = generate
228
+ else:
229
+ generate_fn = generate_mamba # type: ignore[assignment]
230
+ warnings.warn(
231
+ "Batched generation is not correctly supported at the moment and therefore might lead to worse results "
232
+ "as compared to non-batched generation. "
233
+ "See https://github.com/state-spaces/mamba/issues/66#issuecomment-1862349718 for more information."
234
+ )
235
+ encoded_prompts = pad_and_convert_to_tensor(encoded_prompts, mistral_tokenizer.instruct_tokenizer.BOS) # type: ignore[attr-defined]
236
+
237
+ generated_tokens, _logprobs = generate_fn(
238
+ encoded_prompts,
239
+ model, # type: ignore[arg-type]
240
+ max_tokens=max_tokens,
241
+ temperature=temperature,
242
+ eos_id=tokenizer.eos_id,
243
+ )
244
+
245
+ generated_words = []
246
+ for i, x in enumerate(generated_tokens):
247
+ generated_words.append(tokenizer.decode(encoded_prompts[i] + x))
248
+
249
+ res = generated_words
250
+
251
+ if should_print:
252
+ for w, logprob in zip(res, _logprobs):
253
+ print(w)
254
+ logging.debug("Logprobs: %s", logprob)
255
+ print("=====================")
256
+
257
+
258
+ def mistral_chat() -> None:
259
+ fire.Fire(interactive)
260
+
261
+
262
+ def mistral_demo() -> None:
263
+ fire.Fire(demo)
264
+
265
+
266
+ if __name__ == "__main__":
267
+ logging.basicConfig(level=logging.INFO)
268
+ fire.Fire(
269
+ {
270
+ "interactive": interactive,
271
+ "demo": demo,
272
+ }
273
+ )
parrot/lib/python3.10/site-packages/mistral_inference/mamba.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from pathlib import Path
3
+ from typing import List, Optional, Union
4
+
5
+ import safetensors
6
+ import torch
7
+ import torch.nn as nn
8
+
9
+ from mistral_inference.args import MambaArgs
10
+ from mistral_inference.cache import BufferCache
11
+ from mistral_inference.model import ModelBase
12
+
13
+ _is_mamba_installed = False
14
+ try:
15
+ from mamba_ssm.models.config_mamba import MambaConfig
16
+ from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel
17
+
18
+ _is_mamba_installed = True
19
+ except ImportError:
20
+ _is_mamba_installed = False
21
+
22
+
23
+ class Mamba(ModelBase, nn.Module):
24
+ def __init__(self, args: MambaArgs):
25
+ super().__init__()
26
+ self.args = args
27
+ assert _is_mamba_installed, "Mamba is not installed. Please install it using `pip install mamba-ssm`."
28
+
29
+ # make sure naming is consistent with `mamba_ssm`
30
+ config = MambaConfig(
31
+ d_model=args.dim,
32
+ n_layer=args.n_layers,
33
+ vocab_size=args.vocab_size,
34
+ ssm_cfg={"ngroups": args.n_groups, "layer": "Mamba2"},
35
+ attn_layer_idx=[],
36
+ attn_cfg={},
37
+ rms_norm=args.rms_norm,
38
+ residual_in_fp32=args.residual_in_fp32,
39
+ fused_add_norm=args.fused_add_norm,
40
+ pad_vocab_size_multiple=args.pad_vocab_size_multiple,
41
+ tie_embeddings=args.tie_embeddings,
42
+ )
43
+ self.model = MambaLMHeadModel(config)
44
+
45
+ @property
46
+ def dtype(self) -> torch.dtype:
47
+ return next(self.parameters()).dtype
48
+
49
+ @property
50
+ def device(self) -> torch.device:
51
+ return next(self.parameters()).device
52
+
53
+ def forward(
54
+ self,
55
+ input_ids: torch.Tensor,
56
+ seqlens: List[int], # not supported for now
57
+ cache: Optional[BufferCache] = None, # not supported for now
58
+ ) -> torch.Tensor:
59
+ lm_output = self.model(input_ids)
60
+ result: torch.Tensor = lm_output.logits
61
+ return result
62
+
63
+ @staticmethod
64
+ def from_folder(
65
+ folder: Union[Path, str],
66
+ max_batch_size: int = 1,
67
+ num_pipeline_ranks: int = 1,
68
+ device: Union[torch.device, str] = "cuda",
69
+ dtype: Optional[torch.dtype] = None,
70
+ ) -> "Mamba":
71
+ with open(Path(folder) / "params.json", "r") as f:
72
+ model_args = MambaArgs.from_dict(json.load(f))
73
+
74
+ with torch.device("meta"):
75
+ model = Mamba(model_args)
76
+
77
+ model_file = Path(folder) / "consolidated.safetensors"
78
+
79
+ assert model_file.exists(), f"Make sure {model_file} exists."
80
+ loaded = safetensors.torch.load_file(str(model_file))
81
+
82
+ model.load_state_dict(loaded, assign=True, strict=True)
83
+ return model.to(device=device, dtype=dtype)
parrot/lib/python3.10/site-packages/mistral_inference/model.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from pathlib import Path
3
+ from typing import List, Optional, Union
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+
8
+ from mistral_inference.cache import BufferCache
9
+
10
+
11
+ class ModelBase(nn.Module, ABC):
12
+ def __init__(self) -> None:
13
+ super().__init__()
14
+
15
+ @property
16
+ @abstractmethod
17
+ def dtype(self) -> torch.dtype:
18
+ pass
19
+
20
+ @property
21
+ @abstractmethod
22
+ def device(self) -> torch.device:
23
+ pass
24
+
25
+ @abstractmethod
26
+ def forward(
27
+ self,
28
+ input_ids: torch.Tensor,
29
+ seqlens: List[int], # not supported for now
30
+ cache: Optional[BufferCache] = None, # not supported for now
31
+ ) -> torch.Tensor:
32
+ pass
33
+
34
+ @staticmethod
35
+ @abstractmethod
36
+ def from_folder(
37
+ folder: Union[Path, str],
38
+ max_batch_size: int = 1,
39
+ num_pipeline_ranks: int = 1,
40
+ device: Union[torch.device, str] = "cuda",
41
+ dtype: Optional[torch.dtype] = None,
42
+ ) -> "ModelBase":
43
+ pass
parrot/lib/python3.10/site-packages/mistral_inference/moe.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ from typing import List
3
+
4
+ import torch
5
+ import torch.nn.functional as F
6
+ from simple_parsing.helpers import Serializable
7
+ from torch import nn
8
+
9
+
10
+ @dataclasses.dataclass
11
+ class MoeArgs(Serializable):
12
+ num_experts: int
13
+ num_experts_per_tok: int
14
+
15
+
16
+ class MoeLayer(nn.Module):
17
+ def __init__(self, experts: List[nn.Module], gate: nn.Module, moe_args: MoeArgs):
18
+ super().__init__()
19
+ assert len(experts) > 0
20
+ self.experts = nn.ModuleList(experts)
21
+ self.gate = gate
22
+ self.args = moe_args
23
+
24
+ def forward(self, inputs: torch.Tensor) -> torch.Tensor:
25
+ gate_logits = self.gate(inputs)
26
+ weights, selected_experts = torch.topk(gate_logits, self.args.num_experts_per_tok)
27
+ weights = F.softmax(weights, dim=1, dtype=torch.float).to(inputs.dtype)
28
+ results = torch.zeros_like(inputs)
29
+ for i, expert in enumerate(self.experts):
30
+ batch_idx, nth_expert = torch.where(selected_experts == i)
31
+ results[batch_idx] += weights[batch_idx, nth_expert, None] * expert(inputs[batch_idx])
32
+ return results
parrot/lib/python3.10/site-packages/mistral_inference/rope.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple
2
+
3
+ import torch
4
+
5
+
6
+ def precompute_freqs_cis(dim: int, end: int, theta: float) -> torch.Tensor:
7
+ freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
8
+ t = torch.arange(end, device=freqs.device)
9
+ freqs = torch.outer(t, freqs).float()
10
+ return torch.polar(torch.ones_like(freqs), freqs) # complex64
11
+
12
+
13
+ def apply_rotary_emb(
14
+ xq: torch.Tensor,
15
+ xk: torch.Tensor,
16
+ freqs_cis: torch.Tensor,
17
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
18
+ xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
19
+ xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
20
+ freqs_cis = freqs_cis[:, None, :]
21
+ xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(-2)
22
+ xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(-2)
23
+ return xq_out.type_as(xq), xk_out.type_as(xk)
24
+
25
+
26
+ def precompute_freqs_cis_2d(
27
+ dim: int,
28
+ height: int,
29
+ width: int,
30
+ theta: float,
31
+ ) -> torch.Tensor:
32
+ """
33
+ freqs_cis: 2D complex tensor of shape (height, width, dim // 2) to be indexed by
34
+ (height, width) position tuples
35
+ """
36
+ # (dim / 2) frequency bases
37
+ freqs = 1.0 / (theta ** (torch.arange(0, dim, 2).float() / dim))
38
+
39
+ h = torch.arange(height, device=freqs.device)
40
+ w = torch.arange(width, device=freqs.device)
41
+
42
+ freqs_h = torch.outer(h, freqs[::2]).float()
43
+ freqs_w = torch.outer(w, freqs[1::2]).float()
44
+ freqs_2d = torch.cat(
45
+ [
46
+ freqs_h[:, None, :].repeat(1, width, 1),
47
+ freqs_w[None, :, :].repeat(height, 1, 1),
48
+ ],
49
+ dim=-1,
50
+ )
51
+ return torch.polar(torch.ones_like(freqs_2d), freqs_2d)
parrot/lib/python3.10/site-packages/mistral_inference/transformer.py ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ import math
4
+ from dataclasses import dataclass
5
+ from pathlib import Path
6
+ from typing import Any, List, Mapping, Optional, Union
7
+
8
+ import safetensors.torch
9
+ import torch
10
+ from torch import nn
11
+
12
+ from mistral_inference.args import TransformerArgs
13
+ from mistral_inference.cache import BufferCache, CacheInputMetadata
14
+ from mistral_inference.lora import LoRALoaderMixin
15
+ from mistral_inference.model import ModelBase
16
+ from mistral_inference.rope import precompute_freqs_cis
17
+ from mistral_inference.transformer_layers import RMSNorm, TransformerBlock
18
+ from mistral_inference.vision_encoder import VisionLanguageAdapter, VisionTransformer
19
+
20
+
21
+ @dataclass
22
+ class SimpleInputMetadata:
23
+ # rope absolute positions
24
+ positions: torch.Tensor
25
+
26
+ @staticmethod
27
+ def from_seqlens(seqlens: List[int], device: torch.device) -> "SimpleInputMetadata":
28
+ return SimpleInputMetadata(
29
+ positions=torch.cat([torch.arange(0, seqlen) for seqlen in seqlens]).to(device=device, dtype=torch.long)
30
+ )
31
+
32
+
33
+ class Transformer(ModelBase, LoRALoaderMixin):
34
+ def __init__(
35
+ self,
36
+ args: TransformerArgs,
37
+ pipeline_rank: int = 0,
38
+ num_pipeline_ranks: int = 1,
39
+ ):
40
+ super().__init__()
41
+ self.args = args
42
+ self.vocab_size = args.vocab_size
43
+ self.n_layers = args.n_layers
44
+ self._precomputed_freqs_cis: Optional[torch.Tensor] = None
45
+ assert self.vocab_size > 0
46
+ assert pipeline_rank < num_pipeline_ranks, (pipeline_rank, num_pipeline_ranks)
47
+ self.pipeline_rank = pipeline_rank
48
+ self.num_pipeline_ranks = num_pipeline_ranks
49
+ # Modules specific to some ranks:
50
+ self.tok_embeddings: Optional[nn.Embedding] = None
51
+ self.norm: Optional[RMSNorm] = None
52
+ self.output: Optional[nn.Linear] = None
53
+ if pipeline_rank == 0:
54
+ self.tok_embeddings = nn.Embedding(args.vocab_size, args.dim)
55
+
56
+ self.vision_encoder: Optional[VisionTransformer] = None
57
+ self.vision_language_adapter: Optional[VisionLanguageAdapter] = None
58
+ if args.vision_encoder is not None:
59
+ self.vision_encoder = VisionTransformer(args.vision_encoder)
60
+ self.vision_language_adapter = VisionLanguageAdapter(args.vision_encoder.hidden_size, args.dim)
61
+ if pipeline_rank == num_pipeline_ranks - 1:
62
+ self.norm = RMSNorm(args.dim, eps=args.norm_eps)
63
+ self.output = nn.Linear(args.dim, args.vocab_size, bias=False)
64
+ # Initialize all layers but slice off those not of this rank.
65
+ layers = [
66
+ TransformerBlock(
67
+ dim=args.dim,
68
+ hidden_dim=args.hidden_dim,
69
+ n_heads=args.n_heads,
70
+ n_kv_heads=args.n_kv_heads,
71
+ head_dim=args.head_dim,
72
+ norm_eps=args.norm_eps,
73
+ lora=args.lora,
74
+ moe=args.moe,
75
+ )
76
+ for _ in range(args.n_layers)
77
+ ]
78
+ num_layers_per_rank = math.ceil(self.n_layers / self.num_pipeline_ranks)
79
+ offset = self.pipeline_rank * num_layers_per_rank
80
+ end = min(self.n_layers, offset + num_layers_per_rank)
81
+ self.layers = nn.ModuleDict({str(i): layers[i] for i in range(offset, end)})
82
+ self.n_local_layers = len(self.layers)
83
+
84
+ @property
85
+ def dtype(self) -> torch.dtype:
86
+ return next(self.parameters()).dtype
87
+
88
+ @property
89
+ def device(self) -> torch.device:
90
+ return next(self.parameters()).device
91
+
92
+ @property
93
+ def freqs_cis(self) -> torch.Tensor:
94
+ # We cache freqs_cis but need to take care that it is on the right device
95
+ # and has the right dtype (complex64). The fact that the dtype is different
96
+ # from the module's dtype means we cannot register it as a buffer
97
+ if self._precomputed_freqs_cis is None:
98
+ # default to 10**6
99
+ theta = self.args.rope_theta or 1000000.0
100
+ self._precomputed_freqs_cis = precompute_freqs_cis(self.args.head_dim, 128_000, theta)
101
+
102
+ if self._precomputed_freqs_cis.device != self.device:
103
+ self._precomputed_freqs_cis = self._precomputed_freqs_cis.to(device=self.device)
104
+ return self._precomputed_freqs_cis
105
+
106
+ def embed_vision_language_features(self, input_ids: torch.Tensor, images: List[torch.tensor]) -> torch.Tensor: # type: ignore[valid-type]
107
+ assert self.tok_embeddings is not None
108
+ assert self.vision_encoder is not None
109
+ assert self.vision_language_adapter is not None
110
+ assert self.args.vision_encoder is not None
111
+
112
+ text_locations = input_ids != self.args.vision_encoder.image_token_id
113
+ image_locations = input_ids == self.args.vision_encoder.image_token_id
114
+ text_features = self.tok_embeddings(input_ids[text_locations])
115
+ image_features = self.vision_language_adapter(self.vision_encoder(images))
116
+
117
+ seq_len = input_ids.shape[0]
118
+ N_txt, D_txt = text_features.shape
119
+ N_img, D_img = image_features.shape
120
+
121
+ assert D_txt == D_img, f"Text features dim {D_txt} should be equal to image features dim {D_img}"
122
+ assert (
123
+ seq_len == N_txt + N_img
124
+ ), f"seq_len {seq_len} should be equal to N_txt + N_img {(N_txt, N_img, image_locations.sum().item())}"
125
+
126
+ combined_features = torch.empty(
127
+ (seq_len, D_txt),
128
+ dtype=text_features.dtype,
129
+ device=text_features.device,
130
+ )
131
+ combined_features[text_locations, :] = text_features
132
+ combined_features[image_locations, :] = image_features
133
+ return combined_features
134
+
135
+ def forward_partial(
136
+ self,
137
+ input_ids: torch.Tensor,
138
+ seqlens: List[int],
139
+ cache: Optional[BufferCache] = None,
140
+ images: Optional[List[torch.Tensor]] = None,
141
+ ) -> torch.Tensor:
142
+ """Local forward pass.
143
+
144
+ If doing pipeline parallelism, this will return the activations of the last layer of this stage.
145
+ For the last stage, this will return the normalized final embeddings.
146
+ """
147
+ assert (
148
+ len(seqlens) <= self.args.max_batch_size
149
+ ), f"Max batch size is {self.args.max_batch_size}, got batch size of {len(seqlens)}"
150
+ (num_toks,) = input_ids.shape
151
+ assert sum(seqlens) == num_toks, (sum(seqlens), num_toks)
152
+
153
+ input_metadata: Union[CacheInputMetadata, SimpleInputMetadata]
154
+
155
+ if cache is not None:
156
+ input_metadata = cache.get_input_metadata(seqlens)
157
+ else:
158
+ input_metadata = SimpleInputMetadata.from_seqlens(seqlens, self.device)
159
+
160
+ if self.pipeline_rank == 0:
161
+ assert self.tok_embeddings is not None
162
+ if self.vision_encoder is not None and images:
163
+ h = self.embed_vision_language_features(input_ids, images)
164
+ else:
165
+ h = self.tok_embeddings(input_ids)
166
+ else:
167
+ h = torch.empty(num_toks, self.args.dim, device=self.device, dtype=self.dtype)
168
+ torch.distributed.recv(h, src=self.pipeline_rank - 1)
169
+
170
+ freqs_cis = self.freqs_cis[input_metadata.positions]
171
+
172
+ for local_layer_id, layer in enumerate(self.layers.values()):
173
+ if cache is not None:
174
+ assert input_metadata is not None
175
+ assert isinstance(input_metadata, CacheInputMetadata)
176
+ cache_view = cache.get_view(local_layer_id, input_metadata)
177
+ else:
178
+ cache_view = None
179
+ h = layer(h, freqs_cis, cache_view)
180
+
181
+ if cache is not None:
182
+ cache.update_seqlens(seqlens)
183
+ if self.pipeline_rank < self.num_pipeline_ranks - 1:
184
+ torch.distributed.send(h, dst=self.pipeline_rank + 1)
185
+ return h
186
+ else:
187
+ # Last rank has a final normalization step.
188
+ assert self.norm is not None
189
+ return self.norm(h) # type: ignore
190
+
191
+ def forward(
192
+ self,
193
+ input_ids: torch.Tensor,
194
+ seqlens: List[int],
195
+ cache: Optional[BufferCache] = None,
196
+ images: Optional[List[torch.Tensor]] = None,
197
+ ) -> torch.Tensor:
198
+ h = self.forward_partial(input_ids, seqlens, cache=cache, images=images)
199
+ if self.pipeline_rank < self.num_pipeline_ranks - 1:
200
+ # ignore the intermediate activations as we'll get the final output from
201
+ # the last stage
202
+ outs = torch.empty(h.shape[0], self.vocab_size, device=h.device, dtype=h.dtype)
203
+ else:
204
+ assert self.output is not None
205
+ outs = self.output(h)
206
+ if self.num_pipeline_ranks > 1:
207
+ torch.distributed.broadcast(outs, src=self.num_pipeline_ranks - 1)
208
+ return outs.float()
209
+
210
+ def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True, assign: bool = False) -> None:
211
+ state_to_load = {}
212
+ skipped = set([])
213
+ for k, v in state_dict.items():
214
+ if k.startswith("tok_embeddings"):
215
+ if self.pipeline_rank == 0:
216
+ state_to_load[k] = v
217
+ else:
218
+ logging.debug(
219
+ "Skipping parameter %s at pipeline rank %d",
220
+ k,
221
+ self.pipeline_rank,
222
+ )
223
+ skipped.add(k)
224
+ elif k.startswith("norm") or k.startswith("output"):
225
+ if self.pipeline_rank == self.num_pipeline_ranks - 1:
226
+ state_to_load[k] = v
227
+ else:
228
+ logging.debug(
229
+ "Skipping parameter %s at pipeline rank %d",
230
+ k,
231
+ self.pipeline_rank,
232
+ )
233
+ skipped.add(k)
234
+ elif k.startswith("layers"):
235
+ layer_id = k.split(".")[1]
236
+ if layer_id in self.layers:
237
+ state_to_load[k] = v
238
+ else:
239
+ logging.debug(
240
+ "Skipping parameter %s at pipeline rank %d",
241
+ k,
242
+ self.pipeline_rank,
243
+ )
244
+ skipped.add(k)
245
+ elif k.startswith("vision_encoder") or k.startswith("vision_language_adapter"):
246
+ assert not self.pipeline_rank
247
+ state_to_load[k] = v
248
+ else:
249
+ raise ValueError(f"Unexpected key {k}")
250
+ assert set(state_dict.keys()) == skipped.union(set(state_to_load.keys()))
251
+ super().load_state_dict(state_to_load, strict=strict, assign=assign)
252
+
253
+ @staticmethod
254
+ def from_folder(
255
+ folder: Union[Path, str],
256
+ max_batch_size: int = 1,
257
+ num_pipeline_ranks: int = 1,
258
+ device: Union[torch.device, str] = "cuda",
259
+ dtype: Optional[torch.dtype] = None,
260
+ ) -> "Transformer":
261
+ with open(Path(folder) / "params.json", "r") as f:
262
+ model_args = TransformerArgs.from_dict(json.load(f))
263
+ model_args.max_batch_size = max_batch_size
264
+ if num_pipeline_ranks > 1:
265
+ pipeline_rank = torch.distributed.get_rank()
266
+ else:
267
+ pipeline_rank = 0
268
+ with torch.device("meta"):
269
+ model = Transformer(
270
+ model_args,
271
+ pipeline_rank=pipeline_rank,
272
+ num_pipeline_ranks=num_pipeline_ranks,
273
+ )
274
+
275
+ pt_model_file = Path(folder) / "consolidated.00.pth"
276
+ safetensors_model_file = Path(folder) / "consolidated.safetensors"
277
+
278
+ assert (
279
+ pt_model_file.exists() or safetensors_model_file.exists()
280
+ ), f"Make sure either {pt_model_file} or {safetensors_model_file} exists"
281
+ assert not (
282
+ pt_model_file.exists() and safetensors_model_file.exists()
283
+ ), f"Both {pt_model_file} and {safetensors_model_file} cannot exist"
284
+
285
+ if pt_model_file.exists():
286
+ loaded = torch.load(str(pt_model_file), mmap=True)
287
+ else:
288
+ loaded = safetensors.torch.load_file(str(safetensors_model_file))
289
+
290
+ model.load_state_dict(loaded, assign=True, strict=True)
291
+
292
+ return model.to(device=device, dtype=dtype)
parrot/lib/python3.10/site-packages/mistral_inference/transformer_layers.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from typing import Optional, Tuple, Type, Union
3
+
4
+ import torch
5
+ from torch import nn
6
+ from xformers.ops.fmha import memory_efficient_attention # type: ignore
7
+ from xformers.ops.fmha.attn_bias import BlockDiagonalMask
8
+
9
+ from mistral_inference.args import LoraArgs
10
+ from mistral_inference.cache import CacheView
11
+ from mistral_inference.lora import LoRALinear
12
+ from mistral_inference.moe import MoeArgs, MoeLayer
13
+ from mistral_inference.rope import apply_rotary_emb
14
+
15
+
16
+ def repeat_kv(keys: torch.Tensor, values: torch.Tensor, repeats: int, dim: int) -> Tuple[torch.Tensor, torch.Tensor]:
17
+ keys = torch.repeat_interleave(keys, repeats=repeats, dim=dim)
18
+ values = torch.repeat_interleave(values, repeats=repeats, dim=dim)
19
+ return keys, values
20
+
21
+
22
+ def maybe_lora(
23
+ lora_args: Optional[LoraArgs],
24
+ ) -> Union[Type[nn.Linear], partial[LoRALinear]]:
25
+ if lora_args is None:
26
+ return nn.Linear
27
+ else:
28
+ return partial(LoRALinear, rank=lora_args.rank, scaling=lora_args.scaling)
29
+
30
+
31
+ class Attention(nn.Module):
32
+ def __init__(
33
+ self,
34
+ dim: int,
35
+ n_heads: int,
36
+ head_dim: int,
37
+ n_kv_heads: int,
38
+ lora: Optional[LoraArgs] = None,
39
+ ):
40
+ super().__init__()
41
+
42
+ self.n_heads: int = n_heads
43
+ self.head_dim: int = head_dim
44
+ self.n_kv_heads: int = n_kv_heads
45
+
46
+ self.repeats = self.n_heads // self.n_kv_heads
47
+
48
+ self.scale = self.head_dim**-0.5
49
+
50
+ MaybeLora = maybe_lora(lora)
51
+ self.wq = MaybeLora(dim, n_heads * head_dim, bias=False)
52
+ self.wk = MaybeLora(dim, n_kv_heads * head_dim, bias=False)
53
+ self.wv = MaybeLora(dim, n_kv_heads * head_dim, bias=False)
54
+ self.wo = MaybeLora(n_heads * head_dim, dim, bias=False)
55
+
56
+ def forward(
57
+ self,
58
+ x: torch.Tensor,
59
+ freqs_cis: torch.Tensor,
60
+ cache: Optional[CacheView] = None,
61
+ mask: Optional[BlockDiagonalMask] = None,
62
+ ) -> torch.Tensor:
63
+ assert mask is None or cache is None
64
+ seqlen_sum, _ = x.shape
65
+
66
+ xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
67
+ xq = xq.view(seqlen_sum, self.n_heads, self.head_dim)
68
+ xk = xk.view(seqlen_sum, self.n_kv_heads, self.head_dim)
69
+ xv = xv.view(seqlen_sum, self.n_kv_heads, self.head_dim)
70
+ xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis)
71
+
72
+ if cache is None:
73
+ key, val = xk, xv
74
+ elif cache.prefill:
75
+ key, val = cache.interleave_kv(xk, xv)
76
+ cache.update(xk, xv)
77
+ else:
78
+ cache.update(xk, xv)
79
+ key, val = cache.key, cache.value
80
+ key = key.view(seqlen_sum * cache.max_seq_len, self.n_kv_heads, self.head_dim)
81
+ val = val.view(seqlen_sum * cache.max_seq_len, self.n_kv_heads, self.head_dim)
82
+
83
+ # Repeat keys and values to match number of query heads
84
+ key, val = repeat_kv(key, val, self.repeats, dim=1)
85
+
86
+ # xformers requires (B=1, S, H, D)
87
+ xq, key, val = xq[None, ...], key[None, ...], val[None, ...]
88
+ output = memory_efficient_attention(xq, key, val, mask if cache is None else cache.mask)
89
+ output = output.view(seqlen_sum, self.n_heads * self.head_dim)
90
+
91
+ assert isinstance(output, torch.Tensor)
92
+
93
+ return self.wo(output) # type: ignore
94
+
95
+
96
+ class FeedForward(nn.Module):
97
+ def __init__(self, dim: int, hidden_dim: int, lora: Optional[LoraArgs] = None):
98
+ super().__init__()
99
+
100
+ MaybeLora = maybe_lora(lora)
101
+ self.w1 = MaybeLora(dim, hidden_dim, bias=False)
102
+ self.w2 = MaybeLora(hidden_dim, dim, bias=False)
103
+ self.w3 = MaybeLora(dim, hidden_dim, bias=False)
104
+
105
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
106
+ return self.w2(nn.functional.silu(self.w1(x)) * self.w3(x)) # type: ignore
107
+
108
+
109
+ class RMSNorm(torch.nn.Module):
110
+ def __init__(self, dim: int, eps: float = 1e-6):
111
+ super().__init__()
112
+ self.eps = eps
113
+ self.weight = nn.Parameter(torch.ones(dim))
114
+
115
+ def _norm(self, x: torch.Tensor) -> torch.Tensor:
116
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
117
+
118
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
119
+ output = self._norm(x.float()).type_as(x)
120
+ return output * self.weight
121
+
122
+
123
+ class TransformerBlock(nn.Module):
124
+ def __init__(
125
+ self,
126
+ dim: int,
127
+ hidden_dim: int,
128
+ n_heads: int,
129
+ n_kv_heads: int,
130
+ head_dim: int,
131
+ norm_eps: float,
132
+ lora: Optional[LoraArgs] = None,
133
+ moe: Optional[MoeArgs] = None,
134
+ ):
135
+ super().__init__()
136
+ self.n_heads = n_heads
137
+ self.dim = dim
138
+ self.attention = Attention(
139
+ dim=dim,
140
+ n_heads=n_heads,
141
+ head_dim=head_dim,
142
+ n_kv_heads=n_kv_heads,
143
+ lora=lora,
144
+ )
145
+ self.attention_norm = RMSNorm(dim, eps=norm_eps)
146
+ self.ffn_norm = RMSNorm(dim, eps=norm_eps)
147
+
148
+ self.feed_forward: nn.Module
149
+ if moe is not None:
150
+ self.feed_forward = MoeLayer(
151
+ experts=[FeedForward(dim=dim, hidden_dim=hidden_dim, lora=lora) for _ in range(moe.num_experts)],
152
+ gate=nn.Linear(dim, moe.num_experts, bias=False),
153
+ moe_args=moe,
154
+ )
155
+ else:
156
+ self.feed_forward = FeedForward(dim=dim, hidden_dim=hidden_dim, lora=lora)
157
+
158
+ def forward(
159
+ self,
160
+ x: torch.Tensor,
161
+ freqs_cis: torch.Tensor,
162
+ cache: Optional[CacheView] = None,
163
+ mask: Optional[BlockDiagonalMask] = None,
164
+ ) -> torch.Tensor:
165
+ r = self.attention.forward(self.attention_norm(x), freqs_cis, cache)
166
+ h = x + r
167
+ r = self.feed_forward.forward(self.ffn_norm(h))
168
+ out = h + r
169
+ return out
parrot/lib/python3.10/site-packages/mistral_inference/vision_encoder.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ from xformers.ops.fmha.attn_bias import BlockDiagonalMask
6
+
7
+ from mistral_inference.args import VisionEncoderArgs
8
+ from mistral_inference.rope import precompute_freqs_cis_2d
9
+ from mistral_inference.transformer_layers import RMSNorm, TransformerBlock
10
+
11
+
12
+ def position_meshgrid(
13
+ patch_embeds_list: list[torch.Tensor],
14
+ ) -> torch.Tensor:
15
+ positions = torch.cat(
16
+ [
17
+ torch.stack(
18
+ torch.meshgrid(
19
+ torch.arange(p.shape[-2]),
20
+ torch.arange(p.shape[-1]),
21
+ indexing="ij",
22
+ ),
23
+ dim=-1,
24
+ ).reshape(-1, 2)
25
+ for p in patch_embeds_list
26
+ ]
27
+ )
28
+ return positions
29
+
30
+
31
+ class VisionTransformer(nn.Module):
32
+ def __init__(self, args: VisionEncoderArgs):
33
+ super().__init__()
34
+ self.args = args
35
+ self.patch_conv = nn.Conv2d(
36
+ in_channels=args.num_channels,
37
+ out_channels=args.hidden_size,
38
+ kernel_size=args.patch_size,
39
+ stride=args.patch_size,
40
+ bias=False,
41
+ )
42
+ self.ln_pre = RMSNorm(args.hidden_size, eps=1e-5)
43
+ self.transformer = VisionTransformerBlocks(args)
44
+
45
+ head_dim = self.args.hidden_size // self.args.num_attention_heads
46
+ assert head_dim % 2 == 0, "ROPE requires even head_dim"
47
+ self._freqs_cis: Optional[torch.Tensor] = None
48
+
49
+ @property
50
+ def max_patches_per_side(self) -> int:
51
+ return self.args.image_size // self.args.patch_size
52
+
53
+ @property
54
+ def device(self) -> torch.device:
55
+ return next(self.parameters()).device
56
+
57
+ @property
58
+ def freqs_cis(self) -> torch.Tensor:
59
+ if self._freqs_cis is None:
60
+ self._freqs_cis = precompute_freqs_cis_2d(
61
+ dim=self.args.hidden_size // self.args.num_attention_heads,
62
+ height=self.max_patches_per_side,
63
+ width=self.max_patches_per_side,
64
+ theta=self.args.rope_theta,
65
+ )
66
+
67
+ if self._freqs_cis.device != self.device:
68
+ self._freqs_cis = self._freqs_cis.to(device=self.device)
69
+
70
+ return self._freqs_cis
71
+
72
+ def forward(
73
+ self,
74
+ images: List[torch.Tensor],
75
+ ) -> torch.Tensor:
76
+ """
77
+ Args:
78
+ images: list of N_img images of variable sizes, each of shape (C, H, W)
79
+
80
+ Returns:
81
+ image_features: tensor of token features for all tokens of all images of
82
+ shape (N_toks, D)
83
+ """
84
+ # pass images through initial convolution independently
85
+ patch_embeds_list = [self.patch_conv(img.unsqueeze(0)).squeeze(0) for img in images]
86
+
87
+ # flatten to a single sequence
88
+ patch_embeds = torch.cat([p.flatten(1).permute(1, 0) for p in patch_embeds_list], dim=0)
89
+ patch_embeds = self.ln_pre(patch_embeds)
90
+
91
+ # positional embeddings
92
+ positions = position_meshgrid(patch_embeds_list).to(self.device)
93
+ freqs_cis = self.freqs_cis[positions[:, 0], positions[:, 1]]
94
+
95
+ # pass through Transformer with a block diagonal mask delimiting images
96
+ mask = BlockDiagonalMask.from_seqlens(
97
+ [p.shape[-2] * p.shape[-1] for p in patch_embeds_list],
98
+ )
99
+ out = self.transformer(patch_embeds, mask=mask, freqs_cis=freqs_cis)
100
+
101
+ # remove batch dimension of the single sequence
102
+ return out # type: ignore[no-any-return]
103
+
104
+
105
+ class VisionLanguageAdapter(nn.Module):
106
+ def __init__(self, in_dim: int, out_dim: int):
107
+ super().__init__()
108
+ self.w_in = nn.Linear(
109
+ in_dim,
110
+ out_dim,
111
+ bias=True,
112
+ )
113
+ self.gelu = nn.GELU()
114
+ self.w_out = nn.Linear(out_dim, out_dim, bias=True)
115
+
116
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
117
+ return self.w_out(self.gelu(self.w_in(x))) # type: ignore[no-any-return]
118
+
119
+
120
+ class VisionTransformerBlocks(nn.Module):
121
+ def __init__(self, args: VisionEncoderArgs):
122
+ super().__init__()
123
+ self.layers = torch.nn.ModuleList()
124
+ for _ in range(args.num_hidden_layers):
125
+ self.layers.append(
126
+ TransformerBlock(
127
+ dim=args.hidden_size,
128
+ hidden_dim=args.intermediate_size,
129
+ n_heads=args.num_attention_heads,
130
+ n_kv_heads=args.num_attention_heads,
131
+ head_dim=args.hidden_size // args.num_attention_heads,
132
+ norm_eps=1e-5,
133
+ )
134
+ )
135
+
136
+ def forward(
137
+ self,
138
+ x: torch.Tensor,
139
+ mask: BlockDiagonalMask,
140
+ freqs_cis: Optional[torch.Tensor],
141
+ ) -> torch.Tensor:
142
+ for layer in self.layers:
143
+ x = layer(x, mask=mask, freqs_cis=freqs_cis)
144
+ return x
145
+
146
+
parrot/lib/python3.10/site-packages/pillow.libs/libXau-154567c4.so.6.0.0 ADDED
Binary file (22.1 kB). View file
 
parrot/lib/python3.10/site-packages/pillow.libs/libbrotlidec-ba690955.so.1 ADDED
Binary file (58.2 kB). View file
 
parrot/lib/python3.10/site-packages/pillow.libs/libsharpyuv-898c0cb5.so.0.1.0 ADDED
Binary file (42 kB). View file
 
parrot/lib/python3.10/site-packages/pillow.libs/libwebpdemux-f2642bcc.so.2.0.15 ADDED
Binary file (26.1 kB). View file
 
parrot/lib/python3.10/site-packages/pillow.libs/libwebpmux-d524b4d5.so.3.1.0 ADDED
Binary file (54.5 kB). View file
 
parrot/lib/python3.10/site-packages/scripts/__pycache__/test_imports.cpython-310.pyc ADDED
Binary file (234 Bytes). View file
 
parrot/lib/python3.10/site-packages/scripts/__pycache__/test_leak.cpython-310.pyc ADDED
Binary file (3.27 kB). View file
 
parrot/lib/python3.10/site-packages/scripts/run_emscripten_tests.py ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed to the Apache Software Foundation (ASF) under one
2
+ # or more contributor license agreements. See the NOTICE file
3
+ # distributed with this work for additional information
4
+ # regarding copyright ownership. The ASF licenses this file
5
+ # to you under the Apache License, Version 2.0 (the
6
+ # "License"); you may not use this file except in compliance
7
+ # with the License. You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing,
12
+ # software distributed under the License is distributed on an
13
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ # KIND, either express or implied. See the License for the
15
+ # specific language governing permissions and limitations
16
+ # under the License.
17
+
18
+
19
+ import argparse
20
+ import contextlib
21
+ import http.server
22
+ import os
23
+ import queue
24
+ import shutil
25
+ import subprocess
26
+ import sys
27
+ import time
28
+ import threading
29
+
30
+ from pathlib import Path
31
+ from io import BytesIO
32
+
33
+ from selenium import webdriver
34
+
35
+
36
+ class TemplateOverrider(http.server.SimpleHTTPRequestHandler):
37
+ def log_request(self, code="-", size="-"):
38
+ # don't log successful requests
39
+ return
40
+
41
+ def do_GET(self) -> bytes | None:
42
+ if self.path.endswith(PYARROW_WHEEL_PATH.name):
43
+ self.send_response(200)
44
+ self.send_header("Content-type", "application/x-zip")
45
+ self.end_headers()
46
+ with PYARROW_WHEEL_PATH.open(mode="rb") as wheel:
47
+ self.copyfile(wheel, self.wfile)
48
+ if self.path.endswith("/test.html"):
49
+ body = b"""
50
+ <!doctype html>
51
+ <html>
52
+ <head>
53
+ <script>
54
+ window.python_done_callback = undefined;
55
+ window.python_logs = [];
56
+ function capturelogs(evt) {
57
+ if ('results' in evt.data) {
58
+ if (window.python_done_callback) {
59
+ let callback = window.python_done_callback;
60
+ window.python_done_callback = undefined;
61
+ callback({result:evt.data.results});
62
+ }
63
+ }
64
+ if ('print' in evt.data) {
65
+ evt.data.print.forEach((x)=>{window.python_logs.push(x)});
66
+ }
67
+ }
68
+ window.pyworker = new Worker("worker.js");
69
+ window.pyworker.onmessage = capturelogs;
70
+ </script>
71
+ </head>
72
+ <body></body>
73
+ </html>
74
+ """
75
+ self.send_response(200)
76
+ self.send_header("Content-type", "text/html")
77
+ self.send_header("Content-length", len(body))
78
+ self.end_headers()
79
+ self.copyfile(BytesIO(body), self.wfile)
80
+ elif self.path.endswith("/worker.js"):
81
+ body = b"""
82
+ importScripts("./pyodide.js");
83
+ onmessage = async function (e) {
84
+ const data = e.data;
85
+ if (!self.pyodide) {
86
+ self.pyodide = await loadPyodide();
87
+ }
88
+ function do_print(arg) {
89
+ let databytes = Array.from(arg);
90
+ self.postMessage({print:databytes});
91
+ return databytes.length;
92
+ }
93
+ self.pyodide.setStdout({write:do_print,isatty:data.isatty});
94
+ self.pyodide.setStderr({write:do_print,isatty:data.isatty});
95
+
96
+ await self.pyodide.loadPackagesFromImports(data.python);
97
+ let results = await self.pyodide.runPythonAsync(data.python);
98
+ self.postMessage({results});
99
+ }
100
+ """
101
+ self.send_response(200)
102
+ self.send_header("Content-type", "application/javascript")
103
+ self.send_header("Content-length", len(body))
104
+ self.end_headers()
105
+ self.copyfile(BytesIO(body), self.wfile)
106
+
107
+ else:
108
+ return super().do_GET()
109
+
110
+ def end_headers(self):
111
+ # Enable Cross-Origin Resource Sharing (CORS)
112
+ self.send_header("Access-Control-Allow-Origin", "*")
113
+ super().end_headers()
114
+
115
+
116
+ def run_server_thread(dist_dir, q):
117
+ global _SERVER_ADDRESS
118
+ os.chdir(dist_dir)
119
+ server = http.server.HTTPServer(("", 0), TemplateOverrider)
120
+ q.put(server.server_address)
121
+ print(f"Starting server for {dist_dir} at: {server.server_address}")
122
+ server.serve_forever()
123
+
124
+
125
+ @contextlib.contextmanager
126
+ def launch_server(dist_dir):
127
+ q = queue.Queue()
128
+ p = threading.Thread(target=run_server_thread, args=[dist_dir, q], daemon=True)
129
+ p.start()
130
+ address = q.get(timeout=50)
131
+ time.sleep(0.1) # wait to make sure server is started
132
+ yield address
133
+ p.terminate()
134
+
135
+
136
+ class NodeDriver:
137
+ import subprocess
138
+
139
+ def __init__(self, hostname, port):
140
+ self.process = subprocess.Popen(
141
+ [shutil.which("script"), "-c", shutil.which("node")],
142
+ stdin=subprocess.PIPE,
143
+ shell=False,
144
+ bufsize=0,
145
+ )
146
+ print(self.process)
147
+ time.sleep(0.1) # wait for node to start
148
+ self.hostname = hostname
149
+ self.port = port
150
+ self.last_ret_code = None
151
+
152
+ def load_pyodide(self, dist_dir):
153
+ self.execute_js(
154
+ f"""
155
+ const {{ loadPyodide }} = require('{dist_dir}/pyodide.js');
156
+ let pyodide = await loadPyodide();
157
+ """
158
+ )
159
+
160
+ def clear_logs(self):
161
+ pass # we don't handle logs for node
162
+
163
+ def write_stdin(self, buffer):
164
+ # because we use unbuffered IO for
165
+ # stdout, stdin.write is also unbuffered
166
+ # so might under-run on writes
167
+ while len(buffer) > 0 and self.process.poll() is None:
168
+ written = self.process.stdin.write(buffer)
169
+ if written == len(buffer):
170
+ break
171
+ elif written == 0:
172
+ # full buffer - wait
173
+ time.sleep(0.01)
174
+ else:
175
+ buffer = buffer[written:]
176
+
177
+ def execute_js(self, code, wait_for_terminate=True):
178
+ self.write_stdin((code + "\n").encode("utf-8"))
179
+
180
+ def load_arrow(self):
181
+ self.execute_js(f"await pyodide.loadPackage('{PYARROW_WHEEL_PATH}');")
182
+
183
+ def execute_python(self, code, wait_for_terminate=True):
184
+ js_code = f"""
185
+ python = `{code}`;
186
+ await pyodide.loadPackagesFromImports(python);
187
+ python_output = await pyodide.runPythonAsync(python);
188
+ """
189
+ self.last_ret_code = self.execute_js(js_code, wait_for_terminate)
190
+ return self.last_ret_code
191
+
192
+ def wait_for_done(self):
193
+ # in node we just let it run above
194
+ # then send EOF and join process
195
+ self.write_stdin(b"process.exit(python_output)\n")
196
+ return self.process.wait()
197
+
198
+
199
+ class BrowserDriver:
200
+ def __init__(self, hostname, port, driver):
201
+ self.driver = driver
202
+ self.driver.get(f"http://{hostname}:{port}/test.html")
203
+ self.driver.set_script_timeout(100)
204
+
205
+ def load_pyodide(self, dist_dir):
206
+ pass
207
+
208
+ def load_arrow(self):
209
+ self.execute_python(
210
+ f"import pyodide_js as pjs\n"
211
+ f"await pjs.loadPackage('{PYARROW_WHEEL_PATH.name}')\n"
212
+ )
213
+
214
+ def execute_python(self, code, wait_for_terminate=True):
215
+ if wait_for_terminate:
216
+ self.driver.execute_async_script(
217
+ f"""
218
+ let callback = arguments[arguments.length-1];
219
+ python = `{code}`;
220
+ window.python_done_callback = callback;
221
+ window.pyworker.postMessage(
222
+ {{python, isatty: {'true' if sys.stdout.isatty() else 'false'}}});
223
+ """
224
+ )
225
+ else:
226
+ self.driver.execute_script(
227
+ f"""
228
+ let python = `{code}`;
229
+ window.python_done_callback= (x) => {{window.python_script_done=x;}};
230
+ window.pyworker.postMessage(
231
+ {{python,isatty:{'true' if sys.stdout.isatty() else 'false'}}});
232
+ """
233
+ )
234
+
235
+ def clear_logs(self):
236
+ self.driver.execute_script("window.python_logs = [];")
237
+
238
+ def wait_for_done(self):
239
+ while True:
240
+ # poll for console.log messages from our webworker
241
+ # which are the output of pytest
242
+ lines = self.driver.execute_script(
243
+ "let temp = window.python_logs;window.python_logs=[];return temp;"
244
+ )
245
+ if len(lines) > 0:
246
+ sys.stdout.buffer.write(bytes(lines))
247
+ done = self.driver.execute_script("return window.python_script_done;")
248
+ if done is not None:
249
+ value = done["result"]
250
+ self.driver.execute_script("delete window.python_script_done;")
251
+ return value
252
+ time.sleep(0.1)
253
+
254
+
255
+ class ChromeDriver(BrowserDriver):
256
+ def __init__(self, hostname, port):
257
+ from selenium.webdriver.chrome.options import Options
258
+
259
+ options = Options()
260
+ options.add_argument("--headless")
261
+ options.add_argument("--no-sandbox")
262
+ super().__init__(hostname, port, webdriver.Chrome(options=options))
263
+
264
+
265
+ class FirefoxDriver(BrowserDriver):
266
+ def __init__(self, hostname, port):
267
+ from selenium.webdriver.firefox.options import Options
268
+
269
+ options = Options()
270
+ options.add_argument("--headless")
271
+
272
+ super().__init__(hostname, port, webdriver.Firefox(options=options))
273
+
274
+
275
+ def _load_pyarrow_in_runner(driver, wheel_name):
276
+ driver.load_arrow()
277
+ driver.execute_python(
278
+ """import sys
279
+ import micropip
280
+ if "pyarrow" not in sys.modules:
281
+ await micropip.install("hypothesis")
282
+ import pyodide_js as pjs
283
+ await pjs.loadPackage("numpy")
284
+ await pjs.loadPackage("pandas")
285
+ import pytest
286
+ import pandas # import pandas after pyarrow package load for pandas/pyarrow
287
+ # functions to work
288
+ import pyarrow
289
+ """,
290
+ wait_for_terminate=True,
291
+ )
292
+
293
+
294
+ parser = argparse.ArgumentParser()
295
+ parser.add_argument(
296
+ "-d",
297
+ "--dist-dir",
298
+ type=str,
299
+ help="Pyodide distribution directory",
300
+ default="./pyodide",
301
+ )
302
+ parser.add_argument("wheel", type=str, help="Wheel to run tests from")
303
+ parser.add_argument(
304
+ "-t", "--test-submodule", help="Submodule that tests live in", default="test"
305
+ )
306
+ parser.add_argument(
307
+ "-r",
308
+ "--runtime",
309
+ type=str,
310
+ choices=["chrome", "node", "firefox"],
311
+ help="Runtime to run tests in",
312
+ default="chrome",
313
+ )
314
+ args = parser.parse_args()
315
+
316
+ PYARROW_WHEEL_PATH = Path(args.wheel).resolve()
317
+
318
+ dist_dir = Path(os.getcwd(), args.dist_dir).resolve()
319
+ print(f"dist dir={dist_dir}")
320
+ with launch_server(dist_dir) as (hostname, port):
321
+ if args.runtime == "chrome":
322
+ driver = ChromeDriver(hostname, port)
323
+ elif args.runtime == "node":
324
+ driver = NodeDriver(hostname, port)
325
+ elif args.runtime == "firefox":
326
+ driver = FirefoxDriver(hostname, port)
327
+
328
+ print("Load pyodide in browser")
329
+ driver.load_pyodide(dist_dir)
330
+ print("Load pyarrow in browser")
331
+ _load_pyarrow_in_runner(driver, Path(args.wheel).name)
332
+ driver.clear_logs()
333
+ print("Run pytest in browser")
334
+ driver.execute_python(
335
+ """
336
+ import pyarrow,pathlib
337
+ pyarrow_dir = pathlib.Path(pyarrow.__file__).parent
338
+ pytest.main([pyarrow_dir, '-v'])
339
+ """,
340
+ wait_for_terminate=False,
341
+ )
342
+ print("Wait for done")
343
+ os._exit(driver.wait_for_done())
parrot/lib/python3.10/site-packages/scripts/test_leak.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Licensed to the Apache Software Foundation (ASF) under one
4
+ # or more contributor license agreements. See the NOTICE file
5
+ # distributed with this work for additional information
6
+ # regarding copyright ownership. The ASF licenses this file
7
+ # to you under the Apache License, Version 2.0 (the
8
+ # "License"); you may not use this file except in compliance
9
+ # with the License. You may obtain a copy of the License at
10
+ #
11
+ # http://www.apache.org/licenses/LICENSE-2.0
12
+ #
13
+ # Unless required by applicable law or agreed to in writing,
14
+ # software distributed under the License is distributed on an
15
+ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
16
+ # KIND, either express or implied. See the License for the
17
+ # specific language governing permissions and limitations
18
+ # under the License.
19
+
20
+ import pyarrow as pa
21
+ import numpy as np
22
+ import pandas as pd
23
+ from pyarrow.tests.util import rands
24
+ import memory_profiler
25
+ import gc
26
+ import io
27
+
28
+ MEGABYTE = 1 << 20
29
+
30
+
31
+ def assert_does_not_leak(f, iterations=10, check_interval=1, tolerance=5):
32
+ gc.collect()
33
+ baseline = memory_profiler.memory_usage()[0]
34
+ for i in range(iterations):
35
+ f()
36
+ if i % check_interval == 0:
37
+ gc.collect()
38
+ usage = memory_profiler.memory_usage()[0]
39
+ diff = usage - baseline
40
+ print("{0}: {1}\r".format(i, diff), end="")
41
+ if diff > tolerance:
42
+ raise Exception("Memory increased by {0} megabytes after {1} "
43
+ "iterations".format(diff, i + 1))
44
+ gc.collect()
45
+ usage = memory_profiler.memory_usage()[0]
46
+ diff = usage - baseline
47
+ print("\nMemory increased by {0} megabytes after {1} "
48
+ "iterations".format(diff, iterations))
49
+
50
+
51
+ def test_leak1():
52
+ data = [pa.array(np.concatenate([np.random.randn(100000)] * 1000))]
53
+ table = pa.Table.from_arrays(data, ['foo'])
54
+
55
+ def func():
56
+ table.to_pandas()
57
+ assert_does_not_leak(func)
58
+
59
+
60
+ def test_leak2():
61
+ data = [pa.array(np.concatenate([np.random.randn(100000)] * 10))]
62
+ table = pa.Table.from_arrays(data, ['foo'])
63
+
64
+ def func():
65
+ df = table.to_pandas()
66
+
67
+ batch = pa.RecordBatch.from_pandas(df)
68
+
69
+ sink = io.BytesIO()
70
+ writer = pa.RecordBatchFileWriter(sink, batch.schema)
71
+ writer.write_batch(batch)
72
+ writer.close()
73
+
74
+ buf_reader = pa.BufferReader(sink.getvalue())
75
+ reader = pa.open_file(buf_reader)
76
+ reader.read_all()
77
+
78
+ assert_does_not_leak(func, iterations=50, tolerance=50)
79
+
80
+
81
+ def test_leak3():
82
+ import pyarrow.parquet as pq
83
+
84
+ df = pd.DataFrame({'a{0}'.format(i): [1, 2, 3, 4]
85
+ for i in range(50)})
86
+ table = pa.Table.from_pandas(df, preserve_index=False)
87
+
88
+ writer = pq.ParquetWriter('leak_test_' + rands(5) + '.parquet',
89
+ table.schema)
90
+
91
+ def func():
92
+ writer.write_table(table, row_group_size=len(table))
93
+
94
+ # This does not "leak" per se but we do want to have this use as little
95
+ # memory as possible
96
+ assert_does_not_leak(func, iterations=500,
97
+ check_interval=50, tolerance=20)
98
+
99
+
100
+ def test_ARROW_8801():
101
+ x = pd.to_datetime(np.random.randint(0, 2**32, size=2**20, dtype=np.int64),
102
+ unit='ms', utc=True)
103
+ table = pa.table(pd.DataFrame({'x': x}))
104
+
105
+ assert_does_not_leak(lambda: table.to_pandas(split_blocks=False),
106
+ iterations=1000, check_interval=50, tolerance=1000)
107
+
108
+
109
+ if __name__ == '__main__':
110
+ test_ARROW_8801()
parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/PKG-INFO ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.2
2
+ Name: setuptools
3
+ Version: 75.8.0
4
+ Summary: Easily download, build, install, upgrade, and uninstall Python packages
5
+ Author-email: Python Packaging Authority <distutils-sig@python.org>
6
+ Project-URL: Source, https://github.com/pypa/setuptools
7
+ Project-URL: Documentation, https://setuptools.pypa.io/
8
+ Project-URL: Changelog, https://setuptools.pypa.io/en/stable/history.html
9
+ Keywords: CPAN PyPI distutils eggs package management
10
+ Classifier: Development Status :: 5 - Production/Stable
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: License :: OSI Approved :: MIT License
13
+ Classifier: Programming Language :: Python :: 3
14
+ Classifier: Programming Language :: Python :: 3 :: Only
15
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
16
+ Classifier: Topic :: System :: Archiving :: Packaging
17
+ Classifier: Topic :: System :: Systems Administration
18
+ Classifier: Topic :: Utilities
19
+ Requires-Python: >=3.9
20
+ Description-Content-Type: text/x-rst
21
+ License-File: LICENSE
22
+ Provides-Extra: test
23
+ Requires-Dist: pytest!=8.1.*,>=6; extra == "test"
24
+ Requires-Dist: virtualenv>=13.0.0; extra == "test"
25
+ Requires-Dist: wheel>=0.44.0; extra == "test"
26
+ Requires-Dist: pip>=19.1; extra == "test"
27
+ Requires-Dist: packaging>=24.2; extra == "test"
28
+ Requires-Dist: jaraco.envs>=2.2; extra == "test"
29
+ Requires-Dist: pytest-xdist>=3; extra == "test"
30
+ Requires-Dist: jaraco.path>=3.7.2; extra == "test"
31
+ Requires-Dist: build[virtualenv]>=1.0.3; extra == "test"
32
+ Requires-Dist: filelock>=3.4.0; extra == "test"
33
+ Requires-Dist: ini2toml[lite]>=0.14; extra == "test"
34
+ Requires-Dist: tomli-w>=1.0.0; extra == "test"
35
+ Requires-Dist: pytest-timeout; extra == "test"
36
+ Requires-Dist: pytest-perf; sys_platform != "cygwin" and extra == "test"
37
+ Requires-Dist: jaraco.develop>=7.21; (python_version >= "3.9" and sys_platform != "cygwin") and extra == "test"
38
+ Requires-Dist: pytest-home>=0.5; extra == "test"
39
+ Requires-Dist: pytest-subprocess; extra == "test"
40
+ Requires-Dist: pyproject-hooks!=1.1; extra == "test"
41
+ Requires-Dist: jaraco.test>=5.5; extra == "test"
42
+ Provides-Extra: doc
43
+ Requires-Dist: sphinx>=3.5; extra == "doc"
44
+ Requires-Dist: jaraco.packaging>=9.3; extra == "doc"
45
+ Requires-Dist: rst.linker>=1.9; extra == "doc"
46
+ Requires-Dist: furo; extra == "doc"
47
+ Requires-Dist: sphinx-lint; extra == "doc"
48
+ Requires-Dist: jaraco.tidelift>=1.4; extra == "doc"
49
+ Requires-Dist: pygments-github-lexers==0.0.5; extra == "doc"
50
+ Requires-Dist: sphinx-favicon; extra == "doc"
51
+ Requires-Dist: sphinx-inline-tabs; extra == "doc"
52
+ Requires-Dist: sphinx-reredirects; extra == "doc"
53
+ Requires-Dist: sphinxcontrib-towncrier; extra == "doc"
54
+ Requires-Dist: sphinx-notfound-page<2,>=1; extra == "doc"
55
+ Requires-Dist: pyproject-hooks!=1.1; extra == "doc"
56
+ Requires-Dist: towncrier<24.7; extra == "doc"
57
+ Provides-Extra: ssl
58
+ Provides-Extra: certs
59
+ Provides-Extra: core
60
+ Requires-Dist: packaging>=24.2; extra == "core"
61
+ Requires-Dist: more_itertools>=8.8; extra == "core"
62
+ Requires-Dist: jaraco.text>=3.7; extra == "core"
63
+ Requires-Dist: importlib_metadata>=6; python_version < "3.10" and extra == "core"
64
+ Requires-Dist: tomli>=2.0.1; python_version < "3.11" and extra == "core"
65
+ Requires-Dist: wheel>=0.43.0; extra == "core"
66
+ Requires-Dist: platformdirs>=4.2.2; extra == "core"
67
+ Requires-Dist: jaraco.collections; extra == "core"
68
+ Requires-Dist: jaraco.functools>=4; extra == "core"
69
+ Requires-Dist: packaging; extra == "core"
70
+ Requires-Dist: more_itertools; extra == "core"
71
+ Provides-Extra: check
72
+ Requires-Dist: pytest-checkdocs>=2.4; extra == "check"
73
+ Requires-Dist: pytest-ruff>=0.2.1; sys_platform != "cygwin" and extra == "check"
74
+ Requires-Dist: ruff>=0.8.0; sys_platform != "cygwin" and extra == "check"
75
+ Provides-Extra: cover
76
+ Requires-Dist: pytest-cov; extra == "cover"
77
+ Provides-Extra: enabler
78
+ Requires-Dist: pytest-enabler>=2.2; extra == "enabler"
79
+ Provides-Extra: type
80
+ Requires-Dist: pytest-mypy; extra == "type"
81
+ Requires-Dist: mypy==1.14.*; extra == "type"
82
+ Requires-Dist: importlib_metadata>=7.0.2; python_version < "3.10" and extra == "type"
83
+ Requires-Dist: jaraco.develop>=7.21; sys_platform != "cygwin" and extra == "type"
84
+
85
+ .. |pypi-version| image:: https://img.shields.io/pypi/v/setuptools.svg
86
+ :target: https://pypi.org/project/setuptools
87
+
88
+ .. |py-version| image:: https://img.shields.io/pypi/pyversions/setuptools.svg
89
+
90
+ .. |test-badge| image:: https://github.com/pypa/setuptools/actions/workflows/main.yml/badge.svg
91
+ :target: https://github.com/pypa/setuptools/actions?query=workflow%3A%22tests%22
92
+ :alt: tests
93
+
94
+ .. |ruff-badge| image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/charliermarsh/ruff/main/assets/badge/v2.json
95
+ :target: https://github.com/astral-sh/ruff
96
+ :alt: Ruff
97
+
98
+ .. |docs-badge| image:: https://img.shields.io/readthedocs/setuptools/latest.svg
99
+ :target: https://setuptools.pypa.io
100
+
101
+ .. |skeleton-badge| image:: https://img.shields.io/badge/skeleton-2024-informational
102
+ :target: https://blog.jaraco.com/skeleton
103
+
104
+ .. |codecov-badge| image:: https://img.shields.io/codecov/c/github/pypa/setuptools/master.svg?logo=codecov&logoColor=white
105
+ :target: https://codecov.io/gh/pypa/setuptools
106
+
107
+ .. |tidelift-badge| image:: https://tidelift.com/badges/github/pypa/setuptools?style=flat
108
+ :target: https://tidelift.com/subscription/pkg/pypi-setuptools?utm_source=pypi-setuptools&utm_medium=readme
109
+
110
+ .. |discord-badge| image:: https://img.shields.io/discord/803025117553754132
111
+ :target: https://discord.com/channels/803025117553754132/815945031150993468
112
+ :alt: Discord
113
+
114
+ |pypi-version| |py-version| |test-badge| |ruff-badge| |docs-badge| |skeleton-badge| |codecov-badge| |discord-badge|
115
+
116
+ See the `Quickstart <https://setuptools.pypa.io/en/latest/userguide/quickstart.html>`_
117
+ and the `User's Guide <https://setuptools.pypa.io/en/latest/userguide/>`_ for
118
+ instructions on how to use Setuptools.
119
+
120
+ Questions and comments should be directed to `GitHub Discussions
121
+ <https://github.com/pypa/setuptools/discussions>`_.
122
+ Bug reports and especially tested patches may be
123
+ submitted directly to the `bug tracker
124
+ <https://github.com/pypa/setuptools/issues>`_.
125
+
126
+
127
+ Code of Conduct
128
+ ===============
129
+
130
+ Everyone interacting in the setuptools project's codebases, issue trackers,
131
+ chat rooms, and fora is expected to follow the
132
+ `PSF Code of Conduct <https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md>`_.
133
+
134
+
135
+ For Enterprise
136
+ ==============
137
+
138
+ Available as part of the Tidelift Subscription.
139
+
140
+ Setuptools and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use.
141
+
142
+ `Learn more <https://tidelift.com/subscription/pkg/pypi-setuptools?utm_source=pypi-setuptools&utm_medium=referral&utm_campaign=github>`_.
parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,571 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ LICENSE
2
+ MANIFEST.in
3
+ NEWS.rst
4
+ README.rst
5
+ conftest.py
6
+ exercises.py
7
+ launcher.c
8
+ mypy.ini
9
+ pyproject.toml
10
+ pytest.ini
11
+ setup.cfg
12
+ setup.py
13
+ tox.ini
14
+ _distutils_hack/__init__.py
15
+ _distutils_hack/override.py
16
+ docs/artwork.rst
17
+ docs/build_meta.rst
18
+ docs/conf.py
19
+ docs/history.rst
20
+ docs/index.rst
21
+ docs/pkg_resources.rst
22
+ docs/python 2 sunset.rst
23
+ docs/roadmap.rst
24
+ docs/setuptools.rst
25
+ docs/deprecated/changed_keywords.rst
26
+ docs/deprecated/commands.rst
27
+ docs/deprecated/dependency_links.rst
28
+ docs/deprecated/distutils-legacy.rst
29
+ docs/deprecated/easy_install.rst
30
+ docs/deprecated/functionalities.rst
31
+ docs/deprecated/index.rst
32
+ docs/deprecated/python_eggs.rst
33
+ docs/deprecated/resource_extraction.rst
34
+ docs/deprecated/zip_safe.rst
35
+ docs/deprecated/distutils/_setuptools_disclaimer.rst
36
+ docs/deprecated/distutils/apiref.rst
37
+ docs/deprecated/distutils/builtdist.rst
38
+ docs/deprecated/distutils/commandref.rst
39
+ docs/deprecated/distutils/configfile.rst
40
+ docs/deprecated/distutils/examples.rst
41
+ docs/deprecated/distutils/extending.rst
42
+ docs/deprecated/distutils/index.rst
43
+ docs/deprecated/distutils/introduction.rst
44
+ docs/deprecated/distutils/packageindex.rst
45
+ docs/deprecated/distutils/setupscript.rst
46
+ docs/deprecated/distutils/sourcedist.rst
47
+ docs/deprecated/distutils/uploading.rst
48
+ docs/development/developer-guide.rst
49
+ docs/development/index.rst
50
+ docs/development/releases.rst
51
+ docs/references/keywords.rst
52
+ docs/userguide/datafiles.rst
53
+ docs/userguide/declarative_config.rst
54
+ docs/userguide/dependency_management.rst
55
+ docs/userguide/development_mode.rst
56
+ docs/userguide/distribution.rst
57
+ docs/userguide/entry_point.rst
58
+ docs/userguide/ext_modules.rst
59
+ docs/userguide/extension.rst
60
+ docs/userguide/index.rst
61
+ docs/userguide/miscellaneous.rst
62
+ docs/userguide/package_discovery.rst
63
+ docs/userguide/pyproject_config.rst
64
+ docs/userguide/quickstart.rst
65
+ newsfragments/.gitignore
66
+ newsfragments/README.rst
67
+ pkg_resources/__init__.py
68
+ pkg_resources/api_tests.txt
69
+ pkg_resources/py.typed
70
+ pkg_resources/tests/__init__.py
71
+ pkg_resources/tests/test_find_distributions.py
72
+ pkg_resources/tests/test_integration_zope_interface.py
73
+ pkg_resources/tests/test_markers.py
74
+ pkg_resources/tests/test_pkg_resources.py
75
+ pkg_resources/tests/test_resources.py
76
+ pkg_resources/tests/test_working_set.py
77
+ pkg_resources/tests/data/my-test-package-source/setup.cfg
78
+ pkg_resources/tests/data/my-test-package-source/setup.py
79
+ pkg_resources/tests/data/my-test-package-zip/my-test-package.zip
80
+ pkg_resources/tests/data/my-test-package_unpacked-egg/my_test_package-1.0-py3.7.egg/EGG-INFO/PKG-INFO
81
+ pkg_resources/tests/data/my-test-package_unpacked-egg/my_test_package-1.0-py3.7.egg/EGG-INFO/SOURCES.txt
82
+ pkg_resources/tests/data/my-test-package_unpacked-egg/my_test_package-1.0-py3.7.egg/EGG-INFO/dependency_links.txt
83
+ pkg_resources/tests/data/my-test-package_unpacked-egg/my_test_package-1.0-py3.7.egg/EGG-INFO/top_level.txt
84
+ pkg_resources/tests/data/my-test-package_unpacked-egg/my_test_package-1.0-py3.7.egg/EGG-INFO/zip-safe
85
+ pkg_resources/tests/data/my-test-package_zipped-egg/my_test_package-1.0-py3.7.egg
86
+ setuptools/__init__.py
87
+ setuptools/_core_metadata.py
88
+ setuptools/_entry_points.py
89
+ setuptools/_imp.py
90
+ setuptools/_importlib.py
91
+ setuptools/_itertools.py
92
+ setuptools/_normalization.py
93
+ setuptools/_path.py
94
+ setuptools/_reqs.py
95
+ setuptools/_shutil.py
96
+ setuptools/_static.py
97
+ setuptools/archive_util.py
98
+ setuptools/build_meta.py
99
+ setuptools/cli-32.exe
100
+ setuptools/cli-64.exe
101
+ setuptools/cli-arm64.exe
102
+ setuptools/cli.exe
103
+ setuptools/depends.py
104
+ setuptools/discovery.py
105
+ setuptools/dist.py
106
+ setuptools/errors.py
107
+ setuptools/extension.py
108
+ setuptools/glob.py
109
+ setuptools/gui-32.exe
110
+ setuptools/gui-64.exe
111
+ setuptools/gui-arm64.exe
112
+ setuptools/gui.exe
113
+ setuptools/installer.py
114
+ setuptools/launch.py
115
+ setuptools/logging.py
116
+ setuptools/modified.py
117
+ setuptools/monkey.py
118
+ setuptools/msvc.py
119
+ setuptools/namespaces.py
120
+ setuptools/package_index.py
121
+ setuptools/sandbox.py
122
+ setuptools/script (dev).tmpl
123
+ setuptools/script.tmpl
124
+ setuptools/unicode_utils.py
125
+ setuptools/version.py
126
+ setuptools/warnings.py
127
+ setuptools/wheel.py
128
+ setuptools/windows_support.py
129
+ setuptools.egg-info/PKG-INFO
130
+ setuptools.egg-info/SOURCES.txt
131
+ setuptools.egg-info/dependency_links.txt
132
+ setuptools.egg-info/entry_points.txt
133
+ setuptools.egg-info/requires.txt
134
+ setuptools.egg-info/top_level.txt
135
+ setuptools/_distutils/__init__.py
136
+ setuptools/_distutils/_log.py
137
+ setuptools/_distutils/_macos_compat.py
138
+ setuptools/_distutils/_modified.py
139
+ setuptools/_distutils/_msvccompiler.py
140
+ setuptools/_distutils/archive_util.py
141
+ setuptools/_distutils/ccompiler.py
142
+ setuptools/_distutils/cmd.py
143
+ setuptools/_distutils/core.py
144
+ setuptools/_distutils/cygwinccompiler.py
145
+ setuptools/_distutils/debug.py
146
+ setuptools/_distutils/dep_util.py
147
+ setuptools/_distutils/dir_util.py
148
+ setuptools/_distutils/dist.py
149
+ setuptools/_distutils/errors.py
150
+ setuptools/_distutils/extension.py
151
+ setuptools/_distutils/fancy_getopt.py
152
+ setuptools/_distutils/file_util.py
153
+ setuptools/_distutils/filelist.py
154
+ setuptools/_distutils/log.py
155
+ setuptools/_distutils/spawn.py
156
+ setuptools/_distutils/sysconfig.py
157
+ setuptools/_distutils/text_file.py
158
+ setuptools/_distutils/unixccompiler.py
159
+ setuptools/_distutils/util.py
160
+ setuptools/_distutils/version.py
161
+ setuptools/_distutils/versionpredicate.py
162
+ setuptools/_distutils/zosccompiler.py
163
+ setuptools/_distutils/command/__init__.py
164
+ setuptools/_distutils/command/_framework_compat.py
165
+ setuptools/_distutils/command/bdist.py
166
+ setuptools/_distutils/command/bdist_dumb.py
167
+ setuptools/_distutils/command/bdist_rpm.py
168
+ setuptools/_distutils/command/build.py
169
+ setuptools/_distutils/command/build_clib.py
170
+ setuptools/_distutils/command/build_ext.py
171
+ setuptools/_distutils/command/build_py.py
172
+ setuptools/_distutils/command/build_scripts.py
173
+ setuptools/_distutils/command/check.py
174
+ setuptools/_distutils/command/clean.py
175
+ setuptools/_distutils/command/config.py
176
+ setuptools/_distutils/command/install.py
177
+ setuptools/_distutils/command/install_data.py
178
+ setuptools/_distutils/command/install_egg_info.py
179
+ setuptools/_distutils/command/install_headers.py
180
+ setuptools/_distutils/command/install_lib.py
181
+ setuptools/_distutils/command/install_scripts.py
182
+ setuptools/_distutils/command/sdist.py
183
+ setuptools/_distutils/compat/__init__.py
184
+ setuptools/_distutils/compat/py39.py
185
+ setuptools/_distutils/tests/__init__.py
186
+ setuptools/_distutils/tests/support.py
187
+ setuptools/_distutils/tests/test_archive_util.py
188
+ setuptools/_distutils/tests/test_bdist.py
189
+ setuptools/_distutils/tests/test_bdist_dumb.py
190
+ setuptools/_distutils/tests/test_bdist_rpm.py
191
+ setuptools/_distutils/tests/test_build.py
192
+ setuptools/_distutils/tests/test_build_clib.py
193
+ setuptools/_distutils/tests/test_build_ext.py
194
+ setuptools/_distutils/tests/test_build_py.py
195
+ setuptools/_distutils/tests/test_build_scripts.py
196
+ setuptools/_distutils/tests/test_ccompiler.py
197
+ setuptools/_distutils/tests/test_check.py
198
+ setuptools/_distutils/tests/test_clean.py
199
+ setuptools/_distutils/tests/test_cmd.py
200
+ setuptools/_distutils/tests/test_config_cmd.py
201
+ setuptools/_distutils/tests/test_core.py
202
+ setuptools/_distutils/tests/test_cygwinccompiler.py
203
+ setuptools/_distutils/tests/test_dir_util.py
204
+ setuptools/_distutils/tests/test_dist.py
205
+ setuptools/_distutils/tests/test_extension.py
206
+ setuptools/_distutils/tests/test_file_util.py
207
+ setuptools/_distutils/tests/test_filelist.py
208
+ setuptools/_distutils/tests/test_install.py
209
+ setuptools/_distutils/tests/test_install_data.py
210
+ setuptools/_distutils/tests/test_install_headers.py
211
+ setuptools/_distutils/tests/test_install_lib.py
212
+ setuptools/_distutils/tests/test_install_scripts.py
213
+ setuptools/_distutils/tests/test_log.py
214
+ setuptools/_distutils/tests/test_mingwccompiler.py
215
+ setuptools/_distutils/tests/test_modified.py
216
+ setuptools/_distutils/tests/test_msvccompiler.py
217
+ setuptools/_distutils/tests/test_sdist.py
218
+ setuptools/_distutils/tests/test_spawn.py
219
+ setuptools/_distutils/tests/test_sysconfig.py
220
+ setuptools/_distutils/tests/test_text_file.py
221
+ setuptools/_distutils/tests/test_unixccompiler.py
222
+ setuptools/_distutils/tests/test_util.py
223
+ setuptools/_distutils/tests/test_version.py
224
+ setuptools/_distutils/tests/test_versionpredicate.py
225
+ setuptools/_distutils/tests/unix_compat.py
226
+ setuptools/_distutils/tests/compat/__init__.py
227
+ setuptools/_distutils/tests/compat/py39.py
228
+ setuptools/_vendor/ruff.toml
229
+ setuptools/_vendor/typing_extensions.py
230
+ setuptools/_vendor/autocommand/__init__.py
231
+ setuptools/_vendor/autocommand/autoasync.py
232
+ setuptools/_vendor/autocommand/autocommand.py
233
+ setuptools/_vendor/autocommand/automain.py
234
+ setuptools/_vendor/autocommand/autoparse.py
235
+ setuptools/_vendor/autocommand/errors.py
236
+ setuptools/_vendor/autocommand-2.2.2.dist-info/INSTALLER
237
+ setuptools/_vendor/autocommand-2.2.2.dist-info/LICENSE
238
+ setuptools/_vendor/autocommand-2.2.2.dist-info/METADATA
239
+ setuptools/_vendor/autocommand-2.2.2.dist-info/RECORD
240
+ setuptools/_vendor/autocommand-2.2.2.dist-info/WHEEL
241
+ setuptools/_vendor/autocommand-2.2.2.dist-info/top_level.txt
242
+ setuptools/_vendor/backports/__init__.py
243
+ setuptools/_vendor/backports.tarfile-1.2.0.dist-info/INSTALLER
244
+ setuptools/_vendor/backports.tarfile-1.2.0.dist-info/LICENSE
245
+ setuptools/_vendor/backports.tarfile-1.2.0.dist-info/METADATA
246
+ setuptools/_vendor/backports.tarfile-1.2.0.dist-info/RECORD
247
+ setuptools/_vendor/backports.tarfile-1.2.0.dist-info/REQUESTED
248
+ setuptools/_vendor/backports.tarfile-1.2.0.dist-info/WHEEL
249
+ setuptools/_vendor/backports.tarfile-1.2.0.dist-info/top_level.txt
250
+ setuptools/_vendor/backports/tarfile/__init__.py
251
+ setuptools/_vendor/backports/tarfile/__main__.py
252
+ setuptools/_vendor/backports/tarfile/compat/__init__.py
253
+ setuptools/_vendor/backports/tarfile/compat/py38.py
254
+ setuptools/_vendor/importlib_metadata/__init__.py
255
+ setuptools/_vendor/importlib_metadata/_adapters.py
256
+ setuptools/_vendor/importlib_metadata/_collections.py
257
+ setuptools/_vendor/importlib_metadata/_compat.py
258
+ setuptools/_vendor/importlib_metadata/_functools.py
259
+ setuptools/_vendor/importlib_metadata/_itertools.py
260
+ setuptools/_vendor/importlib_metadata/_meta.py
261
+ setuptools/_vendor/importlib_metadata/_text.py
262
+ setuptools/_vendor/importlib_metadata/diagnose.py
263
+ setuptools/_vendor/importlib_metadata/py.typed
264
+ setuptools/_vendor/importlib_metadata-8.0.0.dist-info/INSTALLER
265
+ setuptools/_vendor/importlib_metadata-8.0.0.dist-info/LICENSE
266
+ setuptools/_vendor/importlib_metadata-8.0.0.dist-info/METADATA
267
+ setuptools/_vendor/importlib_metadata-8.0.0.dist-info/RECORD
268
+ setuptools/_vendor/importlib_metadata-8.0.0.dist-info/REQUESTED
269
+ setuptools/_vendor/importlib_metadata-8.0.0.dist-info/WHEEL
270
+ setuptools/_vendor/importlib_metadata-8.0.0.dist-info/top_level.txt
271
+ setuptools/_vendor/importlib_metadata/compat/__init__.py
272
+ setuptools/_vendor/importlib_metadata/compat/py311.py
273
+ setuptools/_vendor/importlib_metadata/compat/py39.py
274
+ setuptools/_vendor/inflect/__init__.py
275
+ setuptools/_vendor/inflect/py.typed
276
+ setuptools/_vendor/inflect-7.3.1.dist-info/INSTALLER
277
+ setuptools/_vendor/inflect-7.3.1.dist-info/LICENSE
278
+ setuptools/_vendor/inflect-7.3.1.dist-info/METADATA
279
+ setuptools/_vendor/inflect-7.3.1.dist-info/RECORD
280
+ setuptools/_vendor/inflect-7.3.1.dist-info/WHEEL
281
+ setuptools/_vendor/inflect-7.3.1.dist-info/top_level.txt
282
+ setuptools/_vendor/inflect/compat/__init__.py
283
+ setuptools/_vendor/inflect/compat/py38.py
284
+ setuptools/_vendor/jaraco/context.py
285
+ setuptools/_vendor/jaraco.collections-5.1.0.dist-info/INSTALLER
286
+ setuptools/_vendor/jaraco.collections-5.1.0.dist-info/LICENSE
287
+ setuptools/_vendor/jaraco.collections-5.1.0.dist-info/METADATA
288
+ setuptools/_vendor/jaraco.collections-5.1.0.dist-info/RECORD
289
+ setuptools/_vendor/jaraco.collections-5.1.0.dist-info/REQUESTED
290
+ setuptools/_vendor/jaraco.collections-5.1.0.dist-info/WHEEL
291
+ setuptools/_vendor/jaraco.collections-5.1.0.dist-info/top_level.txt
292
+ setuptools/_vendor/jaraco.context-5.3.0.dist-info/INSTALLER
293
+ setuptools/_vendor/jaraco.context-5.3.0.dist-info/LICENSE
294
+ setuptools/_vendor/jaraco.context-5.3.0.dist-info/METADATA
295
+ setuptools/_vendor/jaraco.context-5.3.0.dist-info/RECORD
296
+ setuptools/_vendor/jaraco.context-5.3.0.dist-info/WHEEL
297
+ setuptools/_vendor/jaraco.context-5.3.0.dist-info/top_level.txt
298
+ setuptools/_vendor/jaraco.functools-4.0.1.dist-info/INSTALLER
299
+ setuptools/_vendor/jaraco.functools-4.0.1.dist-info/LICENSE
300
+ setuptools/_vendor/jaraco.functools-4.0.1.dist-info/METADATA
301
+ setuptools/_vendor/jaraco.functools-4.0.1.dist-info/RECORD
302
+ setuptools/_vendor/jaraco.functools-4.0.1.dist-info/WHEEL
303
+ setuptools/_vendor/jaraco.functools-4.0.1.dist-info/top_level.txt
304
+ setuptools/_vendor/jaraco.text-3.12.1.dist-info/INSTALLER
305
+ setuptools/_vendor/jaraco.text-3.12.1.dist-info/LICENSE
306
+ setuptools/_vendor/jaraco.text-3.12.1.dist-info/METADATA
307
+ setuptools/_vendor/jaraco.text-3.12.1.dist-info/RECORD
308
+ setuptools/_vendor/jaraco.text-3.12.1.dist-info/REQUESTED
309
+ setuptools/_vendor/jaraco.text-3.12.1.dist-info/WHEEL
310
+ setuptools/_vendor/jaraco.text-3.12.1.dist-info/top_level.txt
311
+ setuptools/_vendor/jaraco/collections/__init__.py
312
+ setuptools/_vendor/jaraco/collections/py.typed
313
+ setuptools/_vendor/jaraco/functools/__init__.py
314
+ setuptools/_vendor/jaraco/functools/__init__.pyi
315
+ setuptools/_vendor/jaraco/functools/py.typed
316
+ setuptools/_vendor/jaraco/text/Lorem ipsum.txt
317
+ setuptools/_vendor/jaraco/text/__init__.py
318
+ setuptools/_vendor/jaraco/text/layouts.py
319
+ setuptools/_vendor/jaraco/text/show-newlines.py
320
+ setuptools/_vendor/jaraco/text/strip-prefix.py
321
+ setuptools/_vendor/jaraco/text/to-dvorak.py
322
+ setuptools/_vendor/jaraco/text/to-qwerty.py
323
+ setuptools/_vendor/more_itertools/__init__.py
324
+ setuptools/_vendor/more_itertools/__init__.pyi
325
+ setuptools/_vendor/more_itertools/more.py
326
+ setuptools/_vendor/more_itertools/more.pyi
327
+ setuptools/_vendor/more_itertools/py.typed
328
+ setuptools/_vendor/more_itertools/recipes.py
329
+ setuptools/_vendor/more_itertools/recipes.pyi
330
+ setuptools/_vendor/more_itertools-10.3.0.dist-info/INSTALLER
331
+ setuptools/_vendor/more_itertools-10.3.0.dist-info/LICENSE
332
+ setuptools/_vendor/more_itertools-10.3.0.dist-info/METADATA
333
+ setuptools/_vendor/more_itertools-10.3.0.dist-info/RECORD
334
+ setuptools/_vendor/more_itertools-10.3.0.dist-info/REQUESTED
335
+ setuptools/_vendor/more_itertools-10.3.0.dist-info/WHEEL
336
+ setuptools/_vendor/packaging/__init__.py
337
+ setuptools/_vendor/packaging/_elffile.py
338
+ setuptools/_vendor/packaging/_manylinux.py
339
+ setuptools/_vendor/packaging/_musllinux.py
340
+ setuptools/_vendor/packaging/_parser.py
341
+ setuptools/_vendor/packaging/_structures.py
342
+ setuptools/_vendor/packaging/_tokenizer.py
343
+ setuptools/_vendor/packaging/markers.py
344
+ setuptools/_vendor/packaging/metadata.py
345
+ setuptools/_vendor/packaging/py.typed
346
+ setuptools/_vendor/packaging/requirements.py
347
+ setuptools/_vendor/packaging/specifiers.py
348
+ setuptools/_vendor/packaging/tags.py
349
+ setuptools/_vendor/packaging/utils.py
350
+ setuptools/_vendor/packaging/version.py
351
+ setuptools/_vendor/packaging-24.2.dist-info/INSTALLER
352
+ setuptools/_vendor/packaging-24.2.dist-info/LICENSE
353
+ setuptools/_vendor/packaging-24.2.dist-info/LICENSE.APACHE
354
+ setuptools/_vendor/packaging-24.2.dist-info/LICENSE.BSD
355
+ setuptools/_vendor/packaging-24.2.dist-info/METADATA
356
+ setuptools/_vendor/packaging-24.2.dist-info/RECORD
357
+ setuptools/_vendor/packaging-24.2.dist-info/REQUESTED
358
+ setuptools/_vendor/packaging-24.2.dist-info/WHEEL
359
+ setuptools/_vendor/packaging/licenses/__init__.py
360
+ setuptools/_vendor/packaging/licenses/_spdx.py
361
+ setuptools/_vendor/platformdirs/__init__.py
362
+ setuptools/_vendor/platformdirs/__main__.py
363
+ setuptools/_vendor/platformdirs/android.py
364
+ setuptools/_vendor/platformdirs/api.py
365
+ setuptools/_vendor/platformdirs/macos.py
366
+ setuptools/_vendor/platformdirs/py.typed
367
+ setuptools/_vendor/platformdirs/unix.py
368
+ setuptools/_vendor/platformdirs/version.py
369
+ setuptools/_vendor/platformdirs/windows.py
370
+ setuptools/_vendor/platformdirs-4.2.2.dist-info/INSTALLER
371
+ setuptools/_vendor/platformdirs-4.2.2.dist-info/METADATA
372
+ setuptools/_vendor/platformdirs-4.2.2.dist-info/RECORD
373
+ setuptools/_vendor/platformdirs-4.2.2.dist-info/REQUESTED
374
+ setuptools/_vendor/platformdirs-4.2.2.dist-info/WHEEL
375
+ setuptools/_vendor/platformdirs-4.2.2.dist-info/licenses/LICENSE
376
+ setuptools/_vendor/tomli/__init__.py
377
+ setuptools/_vendor/tomli/_parser.py
378
+ setuptools/_vendor/tomli/_re.py
379
+ setuptools/_vendor/tomli/_types.py
380
+ setuptools/_vendor/tomli/py.typed
381
+ setuptools/_vendor/tomli-2.0.1.dist-info/INSTALLER
382
+ setuptools/_vendor/tomli-2.0.1.dist-info/LICENSE
383
+ setuptools/_vendor/tomli-2.0.1.dist-info/METADATA
384
+ setuptools/_vendor/tomli-2.0.1.dist-info/RECORD
385
+ setuptools/_vendor/tomli-2.0.1.dist-info/REQUESTED
386
+ setuptools/_vendor/tomli-2.0.1.dist-info/WHEEL
387
+ setuptools/_vendor/typeguard/__init__.py
388
+ setuptools/_vendor/typeguard/_checkers.py
389
+ setuptools/_vendor/typeguard/_config.py
390
+ setuptools/_vendor/typeguard/_decorators.py
391
+ setuptools/_vendor/typeguard/_exceptions.py
392
+ setuptools/_vendor/typeguard/_functions.py
393
+ setuptools/_vendor/typeguard/_importhook.py
394
+ setuptools/_vendor/typeguard/_memo.py
395
+ setuptools/_vendor/typeguard/_pytest_plugin.py
396
+ setuptools/_vendor/typeguard/_suppression.py
397
+ setuptools/_vendor/typeguard/_transformer.py
398
+ setuptools/_vendor/typeguard/_union_transformer.py
399
+ setuptools/_vendor/typeguard/_utils.py
400
+ setuptools/_vendor/typeguard/py.typed
401
+ setuptools/_vendor/typeguard-4.3.0.dist-info/INSTALLER
402
+ setuptools/_vendor/typeguard-4.3.0.dist-info/LICENSE
403
+ setuptools/_vendor/typeguard-4.3.0.dist-info/METADATA
404
+ setuptools/_vendor/typeguard-4.3.0.dist-info/RECORD
405
+ setuptools/_vendor/typeguard-4.3.0.dist-info/WHEEL
406
+ setuptools/_vendor/typeguard-4.3.0.dist-info/entry_points.txt
407
+ setuptools/_vendor/typeguard-4.3.0.dist-info/top_level.txt
408
+ setuptools/_vendor/typing_extensions-4.12.2.dist-info/INSTALLER
409
+ setuptools/_vendor/typing_extensions-4.12.2.dist-info/LICENSE
410
+ setuptools/_vendor/typing_extensions-4.12.2.dist-info/METADATA
411
+ setuptools/_vendor/typing_extensions-4.12.2.dist-info/RECORD
412
+ setuptools/_vendor/typing_extensions-4.12.2.dist-info/WHEEL
413
+ setuptools/_vendor/wheel/__init__.py
414
+ setuptools/_vendor/wheel/__main__.py
415
+ setuptools/_vendor/wheel/_setuptools_logging.py
416
+ setuptools/_vendor/wheel/bdist_wheel.py
417
+ setuptools/_vendor/wheel/macosx_libfile.py
418
+ setuptools/_vendor/wheel/metadata.py
419
+ setuptools/_vendor/wheel/util.py
420
+ setuptools/_vendor/wheel/wheelfile.py
421
+ setuptools/_vendor/wheel-0.43.0.dist-info/INSTALLER
422
+ setuptools/_vendor/wheel-0.43.0.dist-info/LICENSE.txt
423
+ setuptools/_vendor/wheel-0.43.0.dist-info/METADATA
424
+ setuptools/_vendor/wheel-0.43.0.dist-info/RECORD
425
+ setuptools/_vendor/wheel-0.43.0.dist-info/REQUESTED
426
+ setuptools/_vendor/wheel-0.43.0.dist-info/WHEEL
427
+ setuptools/_vendor/wheel-0.43.0.dist-info/entry_points.txt
428
+ setuptools/_vendor/wheel/cli/__init__.py
429
+ setuptools/_vendor/wheel/cli/convert.py
430
+ setuptools/_vendor/wheel/cli/pack.py
431
+ setuptools/_vendor/wheel/cli/tags.py
432
+ setuptools/_vendor/wheel/cli/unpack.py
433
+ setuptools/_vendor/wheel/vendored/__init__.py
434
+ setuptools/_vendor/wheel/vendored/vendor.txt
435
+ setuptools/_vendor/wheel/vendored/packaging/__init__.py
436
+ setuptools/_vendor/wheel/vendored/packaging/_elffile.py
437
+ setuptools/_vendor/wheel/vendored/packaging/_manylinux.py
438
+ setuptools/_vendor/wheel/vendored/packaging/_musllinux.py
439
+ setuptools/_vendor/wheel/vendored/packaging/_parser.py
440
+ setuptools/_vendor/wheel/vendored/packaging/_structures.py
441
+ setuptools/_vendor/wheel/vendored/packaging/_tokenizer.py
442
+ setuptools/_vendor/wheel/vendored/packaging/markers.py
443
+ setuptools/_vendor/wheel/vendored/packaging/requirements.py
444
+ setuptools/_vendor/wheel/vendored/packaging/specifiers.py
445
+ setuptools/_vendor/wheel/vendored/packaging/tags.py
446
+ setuptools/_vendor/wheel/vendored/packaging/utils.py
447
+ setuptools/_vendor/wheel/vendored/packaging/version.py
448
+ setuptools/_vendor/zipp/__init__.py
449
+ setuptools/_vendor/zipp/glob.py
450
+ setuptools/_vendor/zipp-3.19.2.dist-info/INSTALLER
451
+ setuptools/_vendor/zipp-3.19.2.dist-info/LICENSE
452
+ setuptools/_vendor/zipp-3.19.2.dist-info/METADATA
453
+ setuptools/_vendor/zipp-3.19.2.dist-info/RECORD
454
+ setuptools/_vendor/zipp-3.19.2.dist-info/REQUESTED
455
+ setuptools/_vendor/zipp-3.19.2.dist-info/WHEEL
456
+ setuptools/_vendor/zipp-3.19.2.dist-info/top_level.txt
457
+ setuptools/_vendor/zipp/compat/__init__.py
458
+ setuptools/_vendor/zipp/compat/py310.py
459
+ setuptools/command/__init__.py
460
+ setuptools/command/_requirestxt.py
461
+ setuptools/command/alias.py
462
+ setuptools/command/bdist_egg.py
463
+ setuptools/command/bdist_rpm.py
464
+ setuptools/command/bdist_wheel.py
465
+ setuptools/command/build.py
466
+ setuptools/command/build_clib.py
467
+ setuptools/command/build_ext.py
468
+ setuptools/command/build_py.py
469
+ setuptools/command/develop.py
470
+ setuptools/command/dist_info.py
471
+ setuptools/command/easy_install.py
472
+ setuptools/command/editable_wheel.py
473
+ setuptools/command/egg_info.py
474
+ setuptools/command/install.py
475
+ setuptools/command/install_egg_info.py
476
+ setuptools/command/install_lib.py
477
+ setuptools/command/install_scripts.py
478
+ setuptools/command/launcher manifest.xml
479
+ setuptools/command/rotate.py
480
+ setuptools/command/saveopts.py
481
+ setuptools/command/sdist.py
482
+ setuptools/command/setopt.py
483
+ setuptools/command/test.py
484
+ setuptools/compat/__init__.py
485
+ setuptools/compat/py310.py
486
+ setuptools/compat/py311.py
487
+ setuptools/compat/py312.py
488
+ setuptools/compat/py39.py
489
+ setuptools/config/NOTICE
490
+ setuptools/config/__init__.py
491
+ setuptools/config/_apply_pyprojecttoml.py
492
+ setuptools/config/distutils.schema.json
493
+ setuptools/config/expand.py
494
+ setuptools/config/pyprojecttoml.py
495
+ setuptools/config/setupcfg.py
496
+ setuptools/config/setuptools.schema.json
497
+ setuptools/config/_validate_pyproject/NOTICE
498
+ setuptools/config/_validate_pyproject/__init__.py
499
+ setuptools/config/_validate_pyproject/error_reporting.py
500
+ setuptools/config/_validate_pyproject/extra_validations.py
501
+ setuptools/config/_validate_pyproject/fastjsonschema_exceptions.py
502
+ setuptools/config/_validate_pyproject/fastjsonschema_validations.py
503
+ setuptools/config/_validate_pyproject/formats.py
504
+ setuptools/tests/__init__.py
505
+ setuptools/tests/contexts.py
506
+ setuptools/tests/environment.py
507
+ setuptools/tests/fixtures.py
508
+ setuptools/tests/mod_with_constant.py
509
+ setuptools/tests/namespaces.py
510
+ setuptools/tests/script-with-bom.py
511
+ setuptools/tests/server.py
512
+ setuptools/tests/test_archive_util.py
513
+ setuptools/tests/test_bdist_deprecations.py
514
+ setuptools/tests/test_bdist_egg.py
515
+ setuptools/tests/test_bdist_wheel.py
516
+ setuptools/tests/test_build.py
517
+ setuptools/tests/test_build_clib.py
518
+ setuptools/tests/test_build_ext.py
519
+ setuptools/tests/test_build_meta.py
520
+ setuptools/tests/test_build_py.py
521
+ setuptools/tests/test_config_discovery.py
522
+ setuptools/tests/test_core_metadata.py
523
+ setuptools/tests/test_depends.py
524
+ setuptools/tests/test_develop.py
525
+ setuptools/tests/test_dist.py
526
+ setuptools/tests/test_dist_info.py
527
+ setuptools/tests/test_distutils_adoption.py
528
+ setuptools/tests/test_easy_install.py
529
+ setuptools/tests/test_editable_install.py
530
+ setuptools/tests/test_egg_info.py
531
+ setuptools/tests/test_extern.py
532
+ setuptools/tests/test_find_packages.py
533
+ setuptools/tests/test_find_py_modules.py
534
+ setuptools/tests/test_glob.py
535
+ setuptools/tests/test_install_scripts.py
536
+ setuptools/tests/test_logging.py
537
+ setuptools/tests/test_manifest.py
538
+ setuptools/tests/test_namespaces.py
539
+ setuptools/tests/test_packageindex.py
540
+ setuptools/tests/test_sandbox.py
541
+ setuptools/tests/test_sdist.py
542
+ setuptools/tests/test_setopt.py
543
+ setuptools/tests/test_setuptools.py
544
+ setuptools/tests/test_shutil_wrapper.py
545
+ setuptools/tests/test_unicode_utils.py
546
+ setuptools/tests/test_virtualenv.py
547
+ setuptools/tests/test_warnings.py
548
+ setuptools/tests/test_wheel.py
549
+ setuptools/tests/test_windows_wrappers.py
550
+ setuptools/tests/text.py
551
+ setuptools/tests/textwrap.py
552
+ setuptools/tests/compat/__init__.py
553
+ setuptools/tests/compat/py39.py
554
+ setuptools/tests/config/__init__.py
555
+ setuptools/tests/config/setupcfg_examples.txt
556
+ setuptools/tests/config/test_apply_pyprojecttoml.py
557
+ setuptools/tests/config/test_expand.py
558
+ setuptools/tests/config/test_pyprojecttoml.py
559
+ setuptools/tests/config/test_pyprojecttoml_dynamic_deps.py
560
+ setuptools/tests/config/test_setupcfg.py
561
+ setuptools/tests/config/downloads/__init__.py
562
+ setuptools/tests/config/downloads/preload.py
563
+ setuptools/tests/indexes/test_links_priority/external.html
564
+ setuptools/tests/indexes/test_links_priority/simple/foobar/index.html
565
+ setuptools/tests/integration/__init__.py
566
+ setuptools/tests/integration/helpers.py
567
+ setuptools/tests/integration/test_pip_install_sdist.py
568
+ tools/build_launchers.py
569
+ tools/finalize.py
570
+ tools/generate_validation_code.py
571
+ tools/vendored.py
parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/dependency_links.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/entry_points.txt ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [distutils.commands]
2
+ alias = setuptools.command.alias:alias
3
+ bdist_egg = setuptools.command.bdist_egg:bdist_egg
4
+ bdist_rpm = setuptools.command.bdist_rpm:bdist_rpm
5
+ bdist_wheel = setuptools.command.bdist_wheel:bdist_wheel
6
+ build = setuptools.command.build:build
7
+ build_clib = setuptools.command.build_clib:build_clib
8
+ build_ext = setuptools.command.build_ext:build_ext
9
+ build_py = setuptools.command.build_py:build_py
10
+ develop = setuptools.command.develop:develop
11
+ dist_info = setuptools.command.dist_info:dist_info
12
+ easy_install = setuptools.command.easy_install:easy_install
13
+ editable_wheel = setuptools.command.editable_wheel:editable_wheel
14
+ egg_info = setuptools.command.egg_info:egg_info
15
+ install = setuptools.command.install:install
16
+ install_egg_info = setuptools.command.install_egg_info:install_egg_info
17
+ install_lib = setuptools.command.install_lib:install_lib
18
+ install_scripts = setuptools.command.install_scripts:install_scripts
19
+ rotate = setuptools.command.rotate:rotate
20
+ saveopts = setuptools.command.saveopts:saveopts
21
+ sdist = setuptools.command.sdist:sdist
22
+ setopt = setuptools.command.setopt:setopt
23
+
24
+ [distutils.setup_keywords]
25
+ dependency_links = setuptools.dist:assert_string_list
26
+ eager_resources = setuptools.dist:assert_string_list
27
+ entry_points = setuptools.dist:check_entry_points
28
+ exclude_package_data = setuptools.dist:check_package_data
29
+ extras_require = setuptools.dist:check_extras
30
+ include_package_data = setuptools.dist:assert_bool
31
+ install_requires = setuptools.dist:check_requirements
32
+ namespace_packages = setuptools.dist:check_nsp
33
+ package_data = setuptools.dist:check_package_data
34
+ packages = setuptools.dist:check_packages
35
+ python_requires = setuptools.dist:check_specifier
36
+ setup_requires = setuptools.dist:check_requirements
37
+ use_2to3 = setuptools.dist:invalid_unless_false
38
+ zip_safe = setuptools.dist:assert_bool
39
+
40
+ [egg_info.writers]
41
+ PKG-INFO = setuptools.command.egg_info:write_pkg_info
42
+ dependency_links.txt = setuptools.command.egg_info:overwrite_arg
43
+ eager_resources.txt = setuptools.command.egg_info:overwrite_arg
44
+ entry_points.txt = setuptools.command.egg_info:write_entries
45
+ namespace_packages.txt = setuptools.command.egg_info:overwrite_arg
46
+ requires.txt = setuptools.command.egg_info:write_requirements
47
+ top_level.txt = setuptools.command.egg_info:write_toplevel_names
48
+
49
+ [setuptools.finalize_distribution_options]
50
+ keywords = setuptools.dist:Distribution._finalize_setup_keywords
51
+ parent_finalize = setuptools.dist:_Distribution.finalize_options
parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/requires.txt ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ [certs]
3
+
4
+ [check]
5
+ pytest-checkdocs>=2.4
6
+
7
+ [check:sys_platform != "cygwin"]
8
+ pytest-ruff>=0.2.1
9
+ ruff>=0.8.0
10
+
11
+ [core]
12
+ packaging>=24.2
13
+ more_itertools>=8.8
14
+ jaraco.text>=3.7
15
+ wheel>=0.43.0
16
+ platformdirs>=4.2.2
17
+ jaraco.collections
18
+ jaraco.functools>=4
19
+ packaging
20
+ more_itertools
21
+
22
+ [core:python_version < "3.10"]
23
+ importlib_metadata>=6
24
+
25
+ [core:python_version < "3.11"]
26
+ tomli>=2.0.1
27
+
28
+ [cover]
29
+ pytest-cov
30
+
31
+ [doc]
32
+ sphinx>=3.5
33
+ jaraco.packaging>=9.3
34
+ rst.linker>=1.9
35
+ furo
36
+ sphinx-lint
37
+ jaraco.tidelift>=1.4
38
+ pygments-github-lexers==0.0.5
39
+ sphinx-favicon
40
+ sphinx-inline-tabs
41
+ sphinx-reredirects
42
+ sphinxcontrib-towncrier
43
+ sphinx-notfound-page<2,>=1
44
+ pyproject-hooks!=1.1
45
+ towncrier<24.7
46
+
47
+ [enabler]
48
+ pytest-enabler>=2.2
49
+
50
+ [ssl]
51
+
52
+ [test]
53
+ pytest!=8.1.*,>=6
54
+ virtualenv>=13.0.0
55
+ wheel>=0.44.0
56
+ pip>=19.1
57
+ packaging>=24.2
58
+ jaraco.envs>=2.2
59
+ pytest-xdist>=3
60
+ jaraco.path>=3.7.2
61
+ build[virtualenv]>=1.0.3
62
+ filelock>=3.4.0
63
+ ini2toml[lite]>=0.14
64
+ tomli-w>=1.0.0
65
+ pytest-timeout
66
+ pytest-home>=0.5
67
+ pytest-subprocess
68
+ pyproject-hooks!=1.1
69
+ jaraco.test>=5.5
70
+
71
+ [test:python_version >= "3.9" and sys_platform != "cygwin"]
72
+ jaraco.develop>=7.21
73
+
74
+ [test:sys_platform != "cygwin"]
75
+ pytest-perf
76
+
77
+ [type]
78
+ pytest-mypy
79
+ mypy==1.14.*
80
+
81
+ [type:python_version < "3.10"]
82
+ importlib_metadata>=7.0.2
83
+
84
+ [type:sys_platform != "cygwin"]
85
+ jaraco.develop>=7.21
parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/top_level.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ _distutils_hack
2
+ pkg_resources
3
+ setuptools
parrot/lib/python3.10/site-packages/shellingham/__init__.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import os
3
+
4
+ from ._core import ShellDetectionFailure
5
+
6
+ __version__ = "1.5.4"
7
+
8
+
9
+ def detect_shell(pid=None, max_depth=10):
10
+ name = os.name
11
+ try:
12
+ impl = importlib.import_module(".{}".format(name), __name__)
13
+ except ImportError:
14
+ message = "Shell detection not implemented for {0!r}".format(name)
15
+ raise RuntimeError(message)
16
+ try:
17
+ get_shell = impl.get_shell
18
+ except AttributeError:
19
+ raise RuntimeError("get_shell not implemented for {0!r}".format(name))
20
+ shell = get_shell(pid, max_depth=max_depth)
21
+ if shell:
22
+ return shell
23
+ raise ShellDetectionFailure()
parrot/lib/python3.10/site-packages/shellingham/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (798 Bytes). View file
 
parrot/lib/python3.10/site-packages/shellingham/__pycache__/_core.cpython-310.pyc ADDED
Binary file (528 Bytes). View file