diff --git a/parrot/lib/python3.10/site-packages/httpx-0.24.0.dist-info/WHEEL b/parrot/lib/python3.10/site-packages/httpx-0.24.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..58d0071fa27bfd50ba94568078e97b68dee31558
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/httpx-0.24.0.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: hatchling 1.14.0
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/parrot/lib/python3.10/site-packages/idna/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/idna/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9f7ab16a9750fc8bffc5bf7d4576f906aa73f254
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/idna/__pycache__/__init__.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/idna/__pycache__/compat.cpython-310.pyc b/parrot/lib/python3.10/site-packages/idna/__pycache__/compat.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..812706092e4661961e609d38fde3be8b9b4126ae
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/idna/__pycache__/compat.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/idna/__pycache__/core.cpython-310.pyc b/parrot/lib/python3.10/site-packages/idna/__pycache__/core.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2492dce9f6295ae4fff013bf270e5637cf4d07f8
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/idna/__pycache__/core.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/idna/__pycache__/package_data.cpython-310.pyc b/parrot/lib/python3.10/site-packages/idna/__pycache__/package_data.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c5cdec5f18f0b89b81800536bf9af01163ea85c7
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/idna/__pycache__/package_data.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/matplotlib-3.9.2.dist-info/LICENSE b/parrot/lib/python3.10/site-packages/matplotlib-3.9.2.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..ec51537db27dd4d9c9ed3cd39fd96485f3cfddea
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/matplotlib-3.9.2.dist-info/LICENSE
@@ -0,0 +1,99 @@
+License agreement for matplotlib versions 1.3.0 and later
+=========================================================
+
+1. This LICENSE AGREEMENT is between the Matplotlib Development Team
+("MDT"), and the Individual or Organization ("Licensee") accessing and
+otherwise using matplotlib software in source or binary form and its
+associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, MDT
+hereby grants Licensee a nonexclusive, royalty-free, world-wide license
+to reproduce, analyze, test, perform and/or display publicly, prepare
+derivative works, distribute, and otherwise use matplotlib
+alone or in any derivative version, provided, however, that MDT's
+License Agreement and MDT's notice of copyright, i.e., "Copyright (c)
+2012- Matplotlib Development Team; All Rights Reserved" are retained in
+matplotlib alone or in any derivative version prepared by
+Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on or
+incorporates matplotlib or any part thereof, and wants to
+make the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to matplotlib .
+
+4. MDT is making matplotlib available to Licensee on an "AS
+IS" basis. MDT MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, MDT MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB
+WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. MDT SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB
+ FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR
+LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING
+MATPLOTLIB , OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF
+THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any
+relationship of agency, partnership, or joint venture between MDT and
+Licensee. This License Agreement does not grant permission to use MDT
+trademarks or trade name in a trademark sense to endorse or promote
+products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using matplotlib ,
+Licensee agrees to be bound by the terms and conditions of this License
+Agreement.
+
+License agreement for matplotlib versions prior to 1.3.0
+========================================================
+
+1. This LICENSE AGREEMENT is between John D. Hunter ("JDH"), and the
+Individual or Organization ("Licensee") accessing and otherwise using
+matplotlib software in source or binary form and its associated
+documentation.
+
+2. Subject to the terms and conditions of this License Agreement, JDH
+hereby grants Licensee a nonexclusive, royalty-free, world-wide license
+to reproduce, analyze, test, perform and/or display publicly, prepare
+derivative works, distribute, and otherwise use matplotlib
+alone or in any derivative version, provided, however, that JDH's
+License Agreement and JDH's notice of copyright, i.e., "Copyright (c)
+2002-2011 John D. Hunter; All Rights Reserved" are retained in
+matplotlib alone or in any derivative version prepared by
+Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on or
+incorporates matplotlib or any part thereof, and wants to
+make the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to matplotlib.
+
+4. JDH is making matplotlib available to Licensee on an "AS
+IS" basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB
+WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB
+ FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR
+LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING
+MATPLOTLIB , OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF
+THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any
+relationship of agency, partnership, or joint venture between JDH and
+Licensee. This License Agreement does not grant permission to use JDH
+trademarks or trade name in a trademark sense to endorse or promote
+products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using matplotlib,
+Licensee agrees to be bound by the terms and conditions of this License
+Agreement.
\ No newline at end of file
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/__init__.py b/parrot/lib/python3.10/site-packages/mistral_inference/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e8d9f94621c6b29efab723e119a73a0dbe15089
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/mistral_inference/__init__.py
@@ -0,0 +1 @@
+__version__ = "1.4.0"
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b1f6e9c082cfa67a1db6180bebc50a9895899e07
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/__init__.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/args.cpython-310.pyc b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/args.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2d908f7b44156e4834a56ddab432f6273c023953
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/args.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/cache.cpython-310.pyc b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/cache.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..960980260f395877181813a645021c79677f4825
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/cache.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/generate.cpython-310.pyc b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/generate.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1f678b7a5a7171c9c9a850396f799bf8d85f2431
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/generate.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/lora.cpython-310.pyc b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/lora.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..863dc2460afeb41379dc0b010105daf6cd49a3d7
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/lora.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/main.cpython-310.pyc b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/main.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..75961feb3620eba07790c1a7db724bce92fde8a2
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/main.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/mamba.cpython-310.pyc b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/mamba.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6cb1d9d1168528d870913fc19807cc8f1d94aa19
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/mamba.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/model.cpython-310.pyc b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/model.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e5df143259940714843cd82fbde912979b09d86b
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/model.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/moe.cpython-310.pyc b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/moe.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..53adf2648ab9ee9a78e57f86c28467f6cc10ab9f
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/moe.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/rope.cpython-310.pyc b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/rope.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..666c64488c7e2819f45e789042a2703dc73e6b1f
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/rope.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/transformer.cpython-310.pyc b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/transformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9243618dce52e7cc05bcc3384cdea8967cc8b91b
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/transformer.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/transformer_layers.cpython-310.pyc b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/transformer_layers.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..626429338685a3c9cb7f7e4d5346adaf6e82e9a5
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/transformer_layers.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/vision_encoder.cpython-310.pyc b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/vision_encoder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..481116202df56c70ee5caf0d21b21ad21e2b215c
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/mistral_inference/__pycache__/vision_encoder.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/args.py b/parrot/lib/python3.10/site-packages/mistral_inference/args.py
new file mode 100644
index 0000000000000000000000000000000000000000..a94a2c605977cda5323230a924a83e53adbc51bd
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/mistral_inference/args.py
@@ -0,0 +1,64 @@
+from dataclasses import dataclass
+from typing import Optional
+
+from simple_parsing.helpers import Serializable
+
+from mistral_inference.lora import LoraArgs
+from mistral_inference.moe import MoeArgs
+
+
+@dataclass
+class VisionEncoderArgs:
+ hidden_size: int
+ num_channels: int
+ image_size: int
+ patch_size: int
+ intermediate_size: int
+ num_hidden_layers: int
+ num_attention_heads: int
+ rope_theta: float = 1e4 # for rope-2D
+ image_token_id: int = 10
+
+
+@dataclass
+class TransformerArgs(Serializable):
+ dim: int
+ n_layers: int
+ head_dim: int
+ hidden_dim: int
+ n_heads: int
+ n_kv_heads: int
+ norm_eps: float
+ vocab_size: int
+
+ max_batch_size: int = 0
+
+ # For rotary embeddings. If not set, will be inferred
+ rope_theta: Optional[float] = None
+ # If this is set, we will use MoE layers instead of dense layers.
+ moe: Optional[MoeArgs] = None
+ # If this is set, we will load LoRA linear layers instead of linear layers.
+ lora: Optional[LoraArgs] = None
+ model_type: str = "transformer"
+
+ vision_encoder: Optional[VisionEncoderArgs] = None
+
+ def __post_init__(self) -> None:
+ assert self.model_type == "transformer", self.model_type
+
+
+@dataclass
+class MambaArgs(Serializable):
+ dim: int
+ n_layers: int
+ vocab_size: int
+ n_groups: int
+ rms_norm: bool
+ residual_in_fp32: bool
+ fused_add_norm: bool
+ pad_vocab_size_multiple: int
+ tie_embeddings: bool
+ model_type: str = "mamba"
+
+ def __post_init__(self) -> None:
+ assert self.model_type == "mamba", self.model_type
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/cache.py b/parrot/lib/python3.10/site-packages/mistral_inference/cache.py
new file mode 100644
index 0000000000000000000000000000000000000000..93cfb1c102a4cb2199d408866a12ed75a919f4f9
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/mistral_inference/cache.py
@@ -0,0 +1,201 @@
+from dataclasses import dataclass
+from typing import List, Optional, Tuple
+
+import torch
+from xformers.ops.fmha.attn_bias import ( # type: ignore
+ AttentionBias,
+ BlockDiagonalCausalMask,
+ BlockDiagonalCausalWithOffsetPaddedKeysMask,
+ BlockDiagonalMask,
+)
+
+
+@dataclass
+class CacheInputMetadata:
+ # rope absolute positions
+ positions: torch.Tensor
+ # where tokens should go in the cache
+ cache_positions: torch.Tensor
+
+ # if prefill, use block diagonal causal mask
+ # else use causal with padded key mask
+ prefill: bool
+ mask: AttentionBias
+ seqlens: List[int]
+
+
+def interleave_list(l1: List[torch.Tensor], l2: List[torch.Tensor]) -> List[torch.Tensor]:
+ assert len(l1) == len(l2)
+ return [v for pair in zip(l1, l2) for v in pair]
+
+
+class CacheView:
+ def __init__(
+ self,
+ cache_k: torch.Tensor,
+ cache_v: torch.Tensor,
+ metadata: CacheInputMetadata,
+ kv_seqlens: torch.Tensor,
+ ):
+ self.cache_k = cache_k
+ self.cache_v = cache_v
+ self.kv_seqlens = kv_seqlens
+ self.metadata = metadata
+
+ def update(self, xk: torch.Tensor, xv: torch.Tensor) -> None:
+ """
+ to_cache_mask masks the last [max_seq_len] tokens in each sequence
+ """
+ n_kv_heads, head_dim = self.cache_k.shape[-2:]
+ flat_cache_k = self.cache_k.view(-1, n_kv_heads, head_dim)
+ flat_cache_v = self.cache_v.view(-1, n_kv_heads, head_dim)
+
+ flat_cache_k.index_copy_(0, self.metadata.cache_positions, xk)
+ flat_cache_v.index_copy_(0, self.metadata.cache_positions, xv)
+
+ def interleave_kv(self, xk: torch.Tensor, xv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ This is a naive implementation and not optimized for speed.
+ """
+ assert xk.ndim == xv.ndim == 3 # (B * T, H, D)
+ assert xk.shape == xv.shape
+
+ if all([s == 0 for s in self.metadata.seqlens]):
+ # No cache to interleave
+ return xk, xv
+
+ # Make it a list of [(T, H, D)]
+ xk: Tuple[torch.Tensor] = torch.split(xk, self.metadata.seqlens) # type: ignore
+ xv: Tuple[torch.Tensor] = torch.split(xv, self.metadata.seqlens) # type: ignore
+ assert len(xk) == len(self.kv_seqlens), f"Batch size is {len(self.kv_seqlens)}, got {len(xk)}"
+
+ # Retrieve cache
+ cache_k = [cache_k[:seq_len] for cache_k, seq_len in zip(self.cache_k, self.kv_seqlens)]
+ cache_v = [cache_v[:seq_len] for cache_v, seq_len in zip(self.cache_v, self.kv_seqlens)]
+
+ interleaved_k = interleave_list(cache_k, list(xk))
+ interleaved_v = interleave_list(cache_v, list(xv))
+
+ return torch.cat(interleaved_k, dim=0), torch.cat(interleaved_v, dim=0)
+
+ @property
+ def max_seq_len(self) -> int:
+ return self.cache_k.shape[1]
+
+ @property
+ def key(self) -> torch.Tensor:
+ return self.cache_k[: len(self.kv_seqlens)]
+
+ @property
+ def value(self) -> torch.Tensor:
+ return self.cache_v[: len(self.kv_seqlens)]
+
+ @property
+ def prefill(self) -> bool:
+ return self.metadata.prefill
+
+ @property
+ def mask(self) -> AttentionBias:
+ return self.metadata.mask
+
+
+class BufferCache:
+ """
+ This is an example that implements a buffer cache, allowing for variable length sequences.
+ Allocated cache is rectangular which is wasteful (see PagedAttention for better mechanisms)
+ """
+
+ def __init__(
+ self,
+ n_layers: int,
+ max_batch_size: int,
+ max_seq_len: int,
+ n_kv_heads: int,
+ head_dim: int,
+ ):
+ self.max_seq_len = max_seq_len
+ self.n_kv_heads = n_kv_heads
+ self.head_dim = head_dim
+
+ self.cache_k = torch.empty((n_layers, max_batch_size, max_seq_len, n_kv_heads, head_dim))
+ self.cache_v = torch.empty((n_layers, max_batch_size, max_seq_len, n_kv_heads, head_dim))
+ # holds the valid length for each batch element in the cache
+ self.kv_seqlens: Optional[torch.Tensor] = None
+
+ def get_view(self, layer_id: int, metadata: CacheInputMetadata) -> CacheView:
+ assert self.kv_seqlens is not None
+ return CacheView(self.cache_k[layer_id], self.cache_v[layer_id], metadata, self.kv_seqlens)
+
+ def reset(self) -> None:
+ self.kv_seqlens = None
+
+ def init_kvseqlens(self, batch_size: int) -> None:
+ self.kv_seqlens = torch.zeros((batch_size,), device=self.device, dtype=torch.long)
+
+ @property
+ def device(self) -> torch.device:
+ return self.cache_k.device
+
+ def to(self, device: torch.device, dtype: torch.dtype) -> "BufferCache":
+ self.cache_k = self.cache_k.to(device=device, dtype=dtype)
+ self.cache_v = self.cache_v.to(device=device, dtype=dtype)
+
+ return self
+
+ def update_seqlens(self, seqlens: List[int]) -> None:
+ assert self.kv_seqlens is not None
+ self.kv_seqlens += torch.tensor(seqlens, device=self.device, dtype=torch.long)
+
+ def get_input_metadata(self, seqlens: List[int]) -> CacheInputMetadata:
+ """
+ Get metadata about cache positions
+ """
+ if self.kv_seqlens is None:
+ self.init_kvseqlens(len(seqlens))
+
+ assert isinstance(self.kv_seqlens, torch.Tensor)
+ assert len(seqlens) == len(
+ self.kv_seqlens
+ ), f"Batch size is {len(self.kv_seqlens)}, got {len(seqlens)}, did you forget to reset cache?"
+ seqpos = self.kv_seqlens.tolist()
+
+ assert len(seqlens) > 0, seqlens
+ cached_elements = torch.tensor(seqlens, device=self.device, dtype=torch.long)
+
+ positions = torch.cat([torch.arange(pos, pos + seqlen) for pos, seqlen in zip(seqpos, seqlens)]).to(
+ device=self.device, dtype=torch.long
+ )
+
+ batch_idx = torch.tensor(
+ sum([[i] * seqlen for i, seqlen in enumerate(seqlens)], []),
+ device=self.device,
+ dtype=torch.long,
+ )
+ cache_positions = positions + batch_idx * self.max_seq_len
+
+ first_prefill = seqpos[0] == 0
+ subsequent_prefill = any(seqlen > 1 for seqlen in seqlens)
+ if first_prefill:
+ assert all([pos == 0 for pos in seqpos]), seqpos
+ mask = BlockDiagonalCausalMask.from_seqlens(seqlens).make_local_attention(self.max_seq_len)
+ elif subsequent_prefill:
+ mask = BlockDiagonalMask.from_seqlens(
+ q_seqlen=seqlens,
+ kv_seqlen=[
+ s + cached_s.clamp(max=self.max_seq_len).item() for (s, cached_s) in zip(seqlens, self.kv_seqlens)
+ ],
+ ).make_local_attention_from_bottomright(self.max_seq_len)
+ else:
+ mask = BlockDiagonalCausalWithOffsetPaddedKeysMask.from_seqlens(
+ q_seqlen=seqlens,
+ kv_padding=self.max_seq_len,
+ kv_seqlen=(self.kv_seqlens + cached_elements).clamp(max=self.max_seq_len).tolist(),
+ )
+
+ return CacheInputMetadata(
+ positions=positions,
+ cache_positions=cache_positions,
+ prefill=first_prefill or subsequent_prefill,
+ mask=mask,
+ seqlens=seqlens,
+ )
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/generate.py b/parrot/lib/python3.10/site-packages/mistral_inference/generate.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e906b3c136f259bce0a6f7b8ec1704a86797bda
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/mistral_inference/generate.py
@@ -0,0 +1,169 @@
+from typing import List, Optional, Tuple
+
+import numpy as np
+import torch
+
+from mistral_inference.cache import BufferCache
+from mistral_inference.mamba import Mamba
+from mistral_inference.transformer import Transformer
+
+
+@torch.inference_mode()
+def generate_mamba(
+ encoded_prompts: List[List[int]],
+ model: Mamba,
+ *,
+ max_tokens: int,
+ temperature: float,
+ chunk_size: Optional[int] = None,
+ eos_id: Optional[int] = None,
+) -> Tuple[List[List[int]], List[List[float]]]:
+ input_ids = torch.tensor(encoded_prompts, device=model.device)
+ output = model.model.generate(
+ input_ids=input_ids,
+ max_length=input_ids.shape[-1] + max_tokens,
+ cg=True,
+ return_dict_in_generate=True,
+ output_scores=True,
+ enable_timing=False,
+ eos_token_id=eos_id,
+ temperature=temperature,
+ top_p=0.8,
+ )
+ generated_tokens = output.sequences[:, input_ids.shape[-1] :].tolist()
+
+ _logprobs: List[List[float]] = [[] for _ in range(len(generated_tokens))]
+ for seq_idx, batch_score in enumerate(output.scores):
+ for batch_idx, score in enumerate(batch_score.tolist()):
+ _logprobs[batch_idx].append(score[generated_tokens[batch_idx][seq_idx]])
+
+ return generated_tokens, _logprobs
+
+
+@torch.inference_mode()
+def generate(
+ encoded_prompts: List[List[int]],
+ model: Transformer,
+ images: List[List[np.ndarray]] = [],
+ *,
+ max_tokens: int,
+ temperature: float,
+ chunk_size: Optional[int] = None,
+ eos_id: Optional[int] = None,
+) -> Tuple[List[List[int]], List[List[float]]]:
+ images_torch: List[List[torch.Tensor]] = []
+ if images:
+ assert chunk_size is None
+ images_torch = [
+ [torch.tensor(im, device=model.device, dtype=model.dtype) for im in images_for_sample]
+ for images_for_sample in images
+ ]
+
+ model = model.eval()
+ B, V = len(encoded_prompts), model.args.vocab_size
+
+ seqlens = [len(x) for x in encoded_prompts]
+
+ # Cache
+ cache_window = max(seqlens) + max_tokens
+ cache = BufferCache(
+ model.n_local_layers,
+ model.args.max_batch_size,
+ cache_window,
+ model.args.n_kv_heads,
+ model.args.head_dim,
+ )
+ cache.to(device=model.device, dtype=model.dtype)
+ cache.reset()
+
+ # Bookkeeping
+ logprobs: List[List[float]] = [[] for _ in range(B)]
+ last_token_prelogits = None
+
+ # One chunk if size not specified
+ max_prompt_len = max(seqlens)
+ if chunk_size is None:
+ chunk_size = max_prompt_len
+
+ flattened_images: List[torch.Tensor] = sum(images_torch, [])
+
+ # Encode prompt by chunks
+ for s in range(0, max_prompt_len, chunk_size):
+ prompt_chunks = [p[s : s + chunk_size] for p in encoded_prompts]
+ assert all(len(p) > 0 for p in prompt_chunks)
+ prelogits = model.forward(
+ torch.tensor(sum(prompt_chunks, []), device=model.device, dtype=torch.long),
+ images=flattened_images,
+ seqlens=[len(p) for p in prompt_chunks],
+ cache=cache,
+ )
+ logits = torch.log_softmax(prelogits, dim=-1)
+
+ if last_token_prelogits is not None:
+ # Pass > 1
+ last_token_logits = torch.log_softmax(last_token_prelogits, dim=-1)
+ for i_seq in range(B):
+ logprobs[i_seq].append(last_token_logits[i_seq, prompt_chunks[i_seq][0]].item())
+
+ offset = 0
+ for i_seq, sequence in enumerate(prompt_chunks):
+ logprobs[i_seq].extend([logits[offset + i, sequence[i + 1]].item() for i in range(len(sequence) - 1)])
+ offset += len(sequence)
+
+ last_token_prelogits = prelogits.index_select(
+ 0,
+ torch.tensor([len(p) for p in prompt_chunks], device=prelogits.device).cumsum(dim=0) - 1,
+ )
+ assert last_token_prelogits.shape == (B, V)
+
+ # decode
+ generated_tensors = []
+ is_finished = torch.tensor([False for _ in range(B)])
+
+ assert last_token_prelogits is not None
+ for _ in range(max_tokens):
+ next_token = sample(last_token_prelogits, temperature=temperature, top_p=0.8)
+
+ if eos_id is not None:
+ is_finished = is_finished | (next_token == eos_id).cpu()
+
+ if is_finished.all():
+ break
+
+ last_token_logits = torch.log_softmax(last_token_prelogits, dim=-1)
+ for i in range(B):
+ logprobs[i].append(last_token_logits[i, next_token[i]].item())
+
+ generated_tensors.append(next_token[:, None])
+ last_token_prelogits = model.forward(next_token, seqlens=[1] * B, cache=cache)
+ assert last_token_prelogits.shape == (B, V)
+
+ generated_tokens: List[List[int]]
+ if generated_tensors:
+ generated_tokens = torch.cat(generated_tensors, 1).tolist()
+ else:
+ generated_tokens = []
+
+ return generated_tokens, logprobs
+
+
+def sample(logits: torch.Tensor, temperature: float, top_p: float) -> torch.Tensor:
+ if temperature > 0:
+ probs = torch.softmax(logits / temperature, dim=-1)
+ next_token = sample_top_p(probs, top_p)
+ else:
+ next_token = torch.argmax(logits, dim=-1).unsqueeze(0)
+
+ return next_token.reshape(-1)
+
+
+def sample_top_p(probs: torch.Tensor, p: float) -> torch.Tensor:
+ assert 0 <= p <= 1
+
+ probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
+ probs_sum = torch.cumsum(probs_sort, dim=-1)
+ mask = probs_sum - probs_sort > p
+ probs_sort[mask] = 0.0
+ probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
+ next_token = torch.multinomial(probs_sort, num_samples=1)
+ return torch.gather(probs_idx, -1, next_token)
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/lora.py b/parrot/lib/python3.10/site-packages/mistral_inference/lora.py
new file mode 100644
index 0000000000000000000000000000000000000000..30924290038d4a7d9803000032f620d955a01dd5
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/mistral_inference/lora.py
@@ -0,0 +1,155 @@
+import logging
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Any, Dict, NamedTuple, Union
+
+import safetensors.torch
+import torch
+import torch.nn as nn
+from simple_parsing.helpers import Serializable
+
+
+@dataclass
+class LoraArgs(Serializable):
+ rank: int
+ scaling: float
+
+ def __post_init__(self) -> None:
+ assert self.rank > 0
+ assert self.scaling > 0.0
+
+
+class LoRALinear(nn.Module):
+ """
+ Implementation of:
+ - LoRA: https://arxiv.org/abs/2106.09685
+
+ Notes:
+ - Freezing is handled at network level, not layer level.
+ - Scaling factor controls relative importance of LoRA skip
+ connection versus original frozen weight. General guidance is
+ to keep it to 2.0 and sweep over learning rate when changing
+ the rank.
+ """
+
+ def __init__(
+ self,
+ in_features: int,
+ out_features: int,
+ rank: int,
+ scaling: float,
+ bias: bool = False,
+ ):
+ super().__init__()
+
+ self.in_features = in_features
+ self.out_features = out_features
+ assert not bias
+ self.bias = bias
+ self.rank = rank
+ self.scaling = scaling
+
+ self.lora_A = nn.Linear(
+ self.in_features,
+ self.rank,
+ bias=self.bias,
+ )
+ self.lora_B = nn.Linear(
+ self.rank,
+ self.out_features,
+ bias=self.bias,
+ )
+
+ self.linear = nn.Linear(self.in_features, self.out_features, bias=self.bias)
+
+ # make sure no LoRA weights are marked as "missing" in load_state_dict
+ def ignore_missing_keys(m: nn.Module, incompatible_keys: NamedTuple) -> None:
+ incompatible_keys.missing_keys[:] = [] # type: ignore
+
+ self.register_load_state_dict_post_hook(ignore_missing_keys)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ lora = self.lora_B(self.lora_A(x))
+ result: torch.Tensor = self.linear(x) + lora * self.scaling
+ return result
+
+ def _load_from_state_dict(self, state_dict: Dict[str, Any], prefix: str, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
+ key_name = prefix + "weight"
+
+ # full checkpoint
+ if key_name in state_dict:
+ w_ref = state_dict[key_name]
+
+ # load frozen weights
+ state_dict = {
+ "linear.weight": w_ref,
+ "lora_A.weight": torch.zeros_like(self.lora_A.weight, device=w_ref.device, dtype=w_ref.dtype),
+ "lora_B.weight": torch.zeros_like(self.lora_B.weight, device=w_ref.device, dtype=w_ref.dtype),
+ }
+ self.load_state_dict(state_dict, assign=True, strict=True)
+
+
+class LoRALoaderMixin:
+ def load_lora(self, lora_path: Union[Path, str], scaling: float = 2.0) -> None:
+ """Loads LoRA checkpoint"""
+
+ lora_path = Path(lora_path)
+ assert lora_path.is_file(), f"{lora_path} does not exist or is not a file"
+
+ state_dict = safetensors.torch.load_file(lora_path)
+
+ self._load_lora_state_dict(state_dict, scaling=scaling)
+
+ def _load_lora_state_dict(self, lora_state_dict: Dict[str, torch.Tensor], scaling: float = 2.0) -> None:
+ """Loads LoRA state_dict"""
+ lora_dtypes = set([p.dtype for p in lora_state_dict.values()])
+ assert (
+ len(lora_dtypes) == 1
+ ), f"LoRA weights have multiple different dtypes {lora_dtypes}. All weights need to have the same dtype"
+ lora_dtype = lora_dtypes.pop()
+ assert lora_dtype == self.dtype, f"LoRA weights dtype differs from model's dtype {lora_dtype} != {self.dtype}" # type: ignore[attr-defined]
+ assert all("lora" in key for key in lora_state_dict.keys())
+
+ # move tensors to device
+ lora_state_dict = {k: v.to(self.device) for k, v in lora_state_dict.items()} # type: ignore[attr-defined]
+
+ state_dict = self.state_dict() # type: ignore[attr-defined]
+
+ if self.args.lora is None: # type: ignore[attr-defined]
+ logging.info("Loading and merging LoRA weights...")
+
+ # replace every nn.Linear with a LoRALinear with 'meta' device except the output layer
+ named_modules = dict(self.named_modules()) # type: ignore[attr-defined]
+ for name, module in named_modules.items():
+ if isinstance(module, nn.Linear) and name != "output":
+ layer_id = name.split(".")[1]
+ if layer_id not in self.layers: # type: ignore[attr-defined]
+ logging.debug(
+ "Skipping parameter %s at pipeline rank %d",
+ name,
+ self.pipeline_rank, # type: ignore[attr-defined]
+ )
+ elif (name + ".lora_B.weight") in lora_state_dict:
+ weight = (
+ module.weight
+ + (lora_state_dict[name + ".lora_B.weight"] @ lora_state_dict[name + ".lora_A.weight"])
+ * scaling
+ )
+
+ state_dict[name + ".weight"] = weight
+ else:
+ logging.info("Loading LoRA weights...")
+ for k, v in lora_state_dict.items():
+ state_dict.update(lora_state_dict)
+
+ layer_id = k.split(".")[1]
+ if layer_id in self.layers: # type: ignore[attr-defined]
+ state_dict[k] = v
+ else:
+ logging.debug(
+ "Skipping parameter %s at pipeline rank %d",
+ k,
+ self.pipeline_rank, # type: ignore[attr-defined]
+ )
+
+ self.load_state_dict(state_dict, strict=True) # type: ignore[attr-defined]
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/main.py b/parrot/lib/python3.10/site-packages/mistral_inference/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..28b8f7a72a922be6dc5a983c80a72aaf0e7ec433
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/mistral_inference/main.py
@@ -0,0 +1,273 @@
+import json
+import logging
+import os
+import warnings
+from pathlib import Path
+from typing import List, Optional, Tuple, Type, Union
+
+import fire # type: ignore
+import torch
+import torch.distributed as dist
+from mistral_common.protocol.instruct.messages import (
+ AssistantMessage,
+ ContentChunk,
+ ImageChunk,
+ ImageURLChunk,
+ TextChunk,
+ UserMessage,
+)
+from mistral_common.protocol.instruct.request import ChatCompletionRequest
+from mistral_common.tokens.tokenizers.base import Tokenizer
+from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
+from mistral_common.tokens.tokenizers.sentencepiece import is_sentencepiece
+from mistral_common.tokens.tokenizers.tekken import (
+ SpecialTokenPolicy,
+ Tekkenizer,
+ is_tekken,
+)
+from PIL import Image
+
+from mistral_inference.args import TransformerArgs
+from mistral_inference.generate import generate, generate_mamba
+from mistral_inference.mamba import Mamba
+from mistral_inference.transformer import Transformer
+
+
+def is_torchrun() -> bool:
+ required_vars = ["MASTER_ADDR", "MASTER_PORT", "RANK", "WORLD_SIZE"]
+ return all(var in os.environ for var in required_vars)
+
+
+def load_tokenizer(model_path: Path) -> MistralTokenizer:
+ tokenizer = [f for f in os.listdir(model_path) if is_tekken(model_path / f) or is_sentencepiece(model_path / f)]
+ assert (
+ len(tokenizer) > 0
+ ), f"No tokenizer in {model_path}, place a `tokenizer.model.[v1,v2,v3]` or `tekken.json` file in {model_path}."
+ assert (
+ len(tokenizer) == 1
+ ), f"Multiple tokenizers {', '.join(tokenizer)} found in `model_path`, make sure to only have one tokenizer"
+
+ mistral_tokenizer = MistralTokenizer.from_file(str(model_path / tokenizer[0]))
+
+ if isinstance(mistral_tokenizer.instruct_tokenizer.tokenizer, Tekkenizer):
+ mistral_tokenizer.instruct_tokenizer.tokenizer.special_token_policy = SpecialTokenPolicy.KEEP
+
+ logging.info(f"Loaded tokenizer of type {mistral_tokenizer.instruct_tokenizer.__class__}")
+
+ return mistral_tokenizer
+
+
+def get_model_cls(model_path: str) -> Union[Type[Mamba], Type[Transformer]]:
+ with open(Path(model_path) / "params.json", "r") as f:
+ args_dict = json.load(f)
+
+ return {"mamba": Mamba, "transformer": Transformer}[args_dict.get("model_type", "transformer")] # type: ignore[return-value]
+
+
+def pad_and_convert_to_tensor(list_of_lists: List[List[int]], pad_id: int) -> List[List[int]]:
+ # Determine the length of the longest list
+ max_len = max(len(lst) for lst in list_of_lists)
+
+ # Left pad each list to the maximum length
+ padded_lists = [[pad_id] * (max_len - len(lst)) + lst for lst in list_of_lists]
+
+ return padded_lists
+
+
+def _get_multimodal_input() -> Tuple[UserMessage, bool]:
+ chunks: List[ContentChunk] = []
+
+ response = input("Text prompt: ")
+ if response:
+ chunks.append(TextChunk(text=response))
+
+ print("[You can input zero, one or more images now.]")
+ while True:
+ did_something = False
+ response = input("Image path or url [Leave empty and press enter to finish image input]: ")
+ if response:
+ if Path(response).is_file():
+ chunks.append(ImageChunk(image=Image.open(response)))
+ else:
+ assert response.startswith("http"), f"{response} does not seem to be a valid url."
+ chunks.append(ImageURLChunk(image_url=response))
+ did_something = True
+
+ if not did_something:
+ break
+
+ return UserMessage(content=chunks), not chunks
+
+
+def interactive(
+ model_path: str,
+ max_tokens: int = 35,
+ temperature: float = 0.7,
+ num_pipeline_ranks: int = 1,
+ instruct: bool = False,
+ lora_path: Optional[str] = None,
+) -> None:
+ if is_torchrun():
+ torch.distributed.init_process_group()
+ torch.cuda.set_device(torch.distributed.get_rank())
+ should_print = torch.distributed.get_rank() == 0
+
+ num_pipeline_ranks = torch.distributed.get_world_size()
+ else:
+ should_print = True
+ num_pipeline_ranks = 1
+
+ mistral_tokenizer: MistralTokenizer = load_tokenizer(Path(model_path))
+ tokenizer: Tokenizer = mistral_tokenizer.instruct_tokenizer.tokenizer
+
+ model_cls = get_model_cls(model_path)
+ model = model_cls.from_folder(Path(model_path), max_batch_size=3, num_pipeline_ranks=num_pipeline_ranks)
+ is_multimodal = isinstance(model.args, TransformerArgs) and model.args.vision_encoder is not None
+
+ if is_multimodal:
+ assert instruct, "Multimodal models should only be used in instruct mode"
+
+ # load LoRA
+ if lora_path is not None:
+ model.load_lora(Path(lora_path))
+
+ prompt: str = ""
+ messages: List[UserMessage | AssistantMessage] = []
+
+ while True:
+ if should_print:
+ if not is_multimodal:
+ user_input = input("Prompt: ")
+
+ if instruct:
+ if is_multimodal:
+ mm_input, finished = _get_multimodal_input()
+ if finished:
+ break
+ messages += [mm_input]
+ else:
+ messages += [UserMessage(content=user_input)]
+ chat_completion_request = ChatCompletionRequest(messages=messages)
+
+ tokenized = mistral_tokenizer.encode_chat_completion(chat_completion_request)
+ tokens = tokenized.tokens
+ images = tokenized.images
+ else:
+ prompt += user_input
+
+ tokens = tokenizer.encode(prompt, bos=True, eos=False)
+ images = []
+
+ length_tensor = torch.tensor([len(tokens)], dtype=torch.int)
+ else:
+ length_tensor = torch.tensor([0], dtype=torch.int)
+
+ if is_torchrun():
+ dist.broadcast(length_tensor, src=0)
+
+ if not should_print:
+ tokens = int(length_tensor.item()) * [0]
+
+ generate_fn = generate if isinstance(model, Transformer) else generate_mamba
+ generated_tokens, _ = generate_fn( # type: ignore[operator]
+ [tokens],
+ model,
+ [images],
+ max_tokens=max_tokens,
+ temperature=temperature,
+ eos_id=tokenizer.eos_id,
+ )
+
+ answer = tokenizer.decode(generated_tokens[0])
+
+ if should_print:
+ print(answer)
+ print("=====================")
+
+ if instruct:
+ messages += [AssistantMessage(content=answer)]
+ else:
+ prompt += answer
+
+
+def demo(
+ model_path: str,
+ max_tokens: int = 35,
+ temperature: float = 0,
+ lora_path: Optional[str] = None,
+) -> None:
+ if is_torchrun():
+ torch.distributed.init_process_group()
+ torch.cuda.set_device(torch.distributed.get_rank())
+ should_print = torch.distributed.get_rank() == 0
+
+ num_pipeline_ranks = torch.distributed.get_world_size()
+ else:
+ should_print = True
+ num_pipeline_ranks = 1
+
+ model_cls = get_model_cls(model_path)
+ model = model_cls.from_folder(Path(model_path), max_batch_size=3, num_pipeline_ranks=num_pipeline_ranks)
+ # load LoRA
+ if lora_path is not None:
+ model.load_lora(Path(lora_path))
+
+ mistral_tokenizer: MistralTokenizer = load_tokenizer(Path(model_path))
+ tokenizer: Tokenizer = mistral_tokenizer.instruct_tokenizer.tokenizer
+
+ prompts = [
+ "This is a test",
+ "This is another great test",
+ "This is a third test, mistral AI is very good at testing. ",
+ ]
+
+ encoded_prompts = [tokenizer.encode(prompt, bos=True, eos=False) for prompt in prompts]
+
+ if isinstance(model, Transformer):
+ generate_fn = generate
+ else:
+ generate_fn = generate_mamba # type: ignore[assignment]
+ warnings.warn(
+ "Batched generation is not correctly supported at the moment and therefore might lead to worse results "
+ "as compared to non-batched generation. "
+ "See https://github.com/state-spaces/mamba/issues/66#issuecomment-1862349718 for more information."
+ )
+ encoded_prompts = pad_and_convert_to_tensor(encoded_prompts, mistral_tokenizer.instruct_tokenizer.BOS) # type: ignore[attr-defined]
+
+ generated_tokens, _logprobs = generate_fn(
+ encoded_prompts,
+ model, # type: ignore[arg-type]
+ max_tokens=max_tokens,
+ temperature=temperature,
+ eos_id=tokenizer.eos_id,
+ )
+
+ generated_words = []
+ for i, x in enumerate(generated_tokens):
+ generated_words.append(tokenizer.decode(encoded_prompts[i] + x))
+
+ res = generated_words
+
+ if should_print:
+ for w, logprob in zip(res, _logprobs):
+ print(w)
+ logging.debug("Logprobs: %s", logprob)
+ print("=====================")
+
+
+def mistral_chat() -> None:
+ fire.Fire(interactive)
+
+
+def mistral_demo() -> None:
+ fire.Fire(demo)
+
+
+if __name__ == "__main__":
+ logging.basicConfig(level=logging.INFO)
+ fire.Fire(
+ {
+ "interactive": interactive,
+ "demo": demo,
+ }
+ )
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/mamba.py b/parrot/lib/python3.10/site-packages/mistral_inference/mamba.py
new file mode 100644
index 0000000000000000000000000000000000000000..02745e3d27183e0b6a6b942e235b6c19a10d7c11
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/mistral_inference/mamba.py
@@ -0,0 +1,83 @@
+import json
+from pathlib import Path
+from typing import List, Optional, Union
+
+import safetensors
+import torch
+import torch.nn as nn
+
+from mistral_inference.args import MambaArgs
+from mistral_inference.cache import BufferCache
+from mistral_inference.model import ModelBase
+
+_is_mamba_installed = False
+try:
+ from mamba_ssm.models.config_mamba import MambaConfig
+ from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel
+
+ _is_mamba_installed = True
+except ImportError:
+ _is_mamba_installed = False
+
+
+class Mamba(ModelBase, nn.Module):
+ def __init__(self, args: MambaArgs):
+ super().__init__()
+ self.args = args
+ assert _is_mamba_installed, "Mamba is not installed. Please install it using `pip install mamba-ssm`."
+
+ # make sure naming is consistent with `mamba_ssm`
+ config = MambaConfig(
+ d_model=args.dim,
+ n_layer=args.n_layers,
+ vocab_size=args.vocab_size,
+ ssm_cfg={"ngroups": args.n_groups, "layer": "Mamba2"},
+ attn_layer_idx=[],
+ attn_cfg={},
+ rms_norm=args.rms_norm,
+ residual_in_fp32=args.residual_in_fp32,
+ fused_add_norm=args.fused_add_norm,
+ pad_vocab_size_multiple=args.pad_vocab_size_multiple,
+ tie_embeddings=args.tie_embeddings,
+ )
+ self.model = MambaLMHeadModel(config)
+
+ @property
+ def dtype(self) -> torch.dtype:
+ return next(self.parameters()).dtype
+
+ @property
+ def device(self) -> torch.device:
+ return next(self.parameters()).device
+
+ def forward(
+ self,
+ input_ids: torch.Tensor,
+ seqlens: List[int], # not supported for now
+ cache: Optional[BufferCache] = None, # not supported for now
+ ) -> torch.Tensor:
+ lm_output = self.model(input_ids)
+ result: torch.Tensor = lm_output.logits
+ return result
+
+ @staticmethod
+ def from_folder(
+ folder: Union[Path, str],
+ max_batch_size: int = 1,
+ num_pipeline_ranks: int = 1,
+ device: Union[torch.device, str] = "cuda",
+ dtype: Optional[torch.dtype] = None,
+ ) -> "Mamba":
+ with open(Path(folder) / "params.json", "r") as f:
+ model_args = MambaArgs.from_dict(json.load(f))
+
+ with torch.device("meta"):
+ model = Mamba(model_args)
+
+ model_file = Path(folder) / "consolidated.safetensors"
+
+ assert model_file.exists(), f"Make sure {model_file} exists."
+ loaded = safetensors.torch.load_file(str(model_file))
+
+ model.load_state_dict(loaded, assign=True, strict=True)
+ return model.to(device=device, dtype=dtype)
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/model.py b/parrot/lib/python3.10/site-packages/mistral_inference/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..be41fcdbe8117f3dd0a4504d6c4ce08e460037d0
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/mistral_inference/model.py
@@ -0,0 +1,43 @@
+from abc import ABC, abstractmethod
+from pathlib import Path
+from typing import List, Optional, Union
+
+import torch
+import torch.nn as nn
+
+from mistral_inference.cache import BufferCache
+
+
+class ModelBase(nn.Module, ABC):
+ def __init__(self) -> None:
+ super().__init__()
+
+ @property
+ @abstractmethod
+ def dtype(self) -> torch.dtype:
+ pass
+
+ @property
+ @abstractmethod
+ def device(self) -> torch.device:
+ pass
+
+ @abstractmethod
+ def forward(
+ self,
+ input_ids: torch.Tensor,
+ seqlens: List[int], # not supported for now
+ cache: Optional[BufferCache] = None, # not supported for now
+ ) -> torch.Tensor:
+ pass
+
+ @staticmethod
+ @abstractmethod
+ def from_folder(
+ folder: Union[Path, str],
+ max_batch_size: int = 1,
+ num_pipeline_ranks: int = 1,
+ device: Union[torch.device, str] = "cuda",
+ dtype: Optional[torch.dtype] = None,
+ ) -> "ModelBase":
+ pass
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/moe.py b/parrot/lib/python3.10/site-packages/mistral_inference/moe.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ce8a8a9c62e9a8edff797c0262b393c30ea38e7
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/mistral_inference/moe.py
@@ -0,0 +1,32 @@
+import dataclasses
+from typing import List
+
+import torch
+import torch.nn.functional as F
+from simple_parsing.helpers import Serializable
+from torch import nn
+
+
+@dataclasses.dataclass
+class MoeArgs(Serializable):
+ num_experts: int
+ num_experts_per_tok: int
+
+
+class MoeLayer(nn.Module):
+ def __init__(self, experts: List[nn.Module], gate: nn.Module, moe_args: MoeArgs):
+ super().__init__()
+ assert len(experts) > 0
+ self.experts = nn.ModuleList(experts)
+ self.gate = gate
+ self.args = moe_args
+
+ def forward(self, inputs: torch.Tensor) -> torch.Tensor:
+ gate_logits = self.gate(inputs)
+ weights, selected_experts = torch.topk(gate_logits, self.args.num_experts_per_tok)
+ weights = F.softmax(weights, dim=1, dtype=torch.float).to(inputs.dtype)
+ results = torch.zeros_like(inputs)
+ for i, expert in enumerate(self.experts):
+ batch_idx, nth_expert = torch.where(selected_experts == i)
+ results[batch_idx] += weights[batch_idx, nth_expert, None] * expert(inputs[batch_idx])
+ return results
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/rope.py b/parrot/lib/python3.10/site-packages/mistral_inference/rope.py
new file mode 100644
index 0000000000000000000000000000000000000000..29749ff83669165698c6d08d201794d786e777cf
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/mistral_inference/rope.py
@@ -0,0 +1,51 @@
+from typing import Tuple
+
+import torch
+
+
+def precompute_freqs_cis(dim: int, end: int, theta: float) -> torch.Tensor:
+ freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
+ t = torch.arange(end, device=freqs.device)
+ freqs = torch.outer(t, freqs).float()
+ return torch.polar(torch.ones_like(freqs), freqs) # complex64
+
+
+def apply_rotary_emb(
+ xq: torch.Tensor,
+ xk: torch.Tensor,
+ freqs_cis: torch.Tensor,
+) -> Tuple[torch.Tensor, torch.Tensor]:
+ xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
+ xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
+ freqs_cis = freqs_cis[:, None, :]
+ xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(-2)
+ xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(-2)
+ return xq_out.type_as(xq), xk_out.type_as(xk)
+
+
+def precompute_freqs_cis_2d(
+ dim: int,
+ height: int,
+ width: int,
+ theta: float,
+) -> torch.Tensor:
+ """
+ freqs_cis: 2D complex tensor of shape (height, width, dim // 2) to be indexed by
+ (height, width) position tuples
+ """
+ # (dim / 2) frequency bases
+ freqs = 1.0 / (theta ** (torch.arange(0, dim, 2).float() / dim))
+
+ h = torch.arange(height, device=freqs.device)
+ w = torch.arange(width, device=freqs.device)
+
+ freqs_h = torch.outer(h, freqs[::2]).float()
+ freqs_w = torch.outer(w, freqs[1::2]).float()
+ freqs_2d = torch.cat(
+ [
+ freqs_h[:, None, :].repeat(1, width, 1),
+ freqs_w[None, :, :].repeat(height, 1, 1),
+ ],
+ dim=-1,
+ )
+ return torch.polar(torch.ones_like(freqs_2d), freqs_2d)
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/transformer.py b/parrot/lib/python3.10/site-packages/mistral_inference/transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c9aebec3ad86ff3c50945652f218bd6c5229cdd
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/mistral_inference/transformer.py
@@ -0,0 +1,292 @@
+import json
+import logging
+import math
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Any, List, Mapping, Optional, Union
+
+import safetensors.torch
+import torch
+from torch import nn
+
+from mistral_inference.args import TransformerArgs
+from mistral_inference.cache import BufferCache, CacheInputMetadata
+from mistral_inference.lora import LoRALoaderMixin
+from mistral_inference.model import ModelBase
+from mistral_inference.rope import precompute_freqs_cis
+from mistral_inference.transformer_layers import RMSNorm, TransformerBlock
+from mistral_inference.vision_encoder import VisionLanguageAdapter, VisionTransformer
+
+
+@dataclass
+class SimpleInputMetadata:
+ # rope absolute positions
+ positions: torch.Tensor
+
+ @staticmethod
+ def from_seqlens(seqlens: List[int], device: torch.device) -> "SimpleInputMetadata":
+ return SimpleInputMetadata(
+ positions=torch.cat([torch.arange(0, seqlen) for seqlen in seqlens]).to(device=device, dtype=torch.long)
+ )
+
+
+class Transformer(ModelBase, LoRALoaderMixin):
+ def __init__(
+ self,
+ args: TransformerArgs,
+ pipeline_rank: int = 0,
+ num_pipeline_ranks: int = 1,
+ ):
+ super().__init__()
+ self.args = args
+ self.vocab_size = args.vocab_size
+ self.n_layers = args.n_layers
+ self._precomputed_freqs_cis: Optional[torch.Tensor] = None
+ assert self.vocab_size > 0
+ assert pipeline_rank < num_pipeline_ranks, (pipeline_rank, num_pipeline_ranks)
+ self.pipeline_rank = pipeline_rank
+ self.num_pipeline_ranks = num_pipeline_ranks
+ # Modules specific to some ranks:
+ self.tok_embeddings: Optional[nn.Embedding] = None
+ self.norm: Optional[RMSNorm] = None
+ self.output: Optional[nn.Linear] = None
+ if pipeline_rank == 0:
+ self.tok_embeddings = nn.Embedding(args.vocab_size, args.dim)
+
+ self.vision_encoder: Optional[VisionTransformer] = None
+ self.vision_language_adapter: Optional[VisionLanguageAdapter] = None
+ if args.vision_encoder is not None:
+ self.vision_encoder = VisionTransformer(args.vision_encoder)
+ self.vision_language_adapter = VisionLanguageAdapter(args.vision_encoder.hidden_size, args.dim)
+ if pipeline_rank == num_pipeline_ranks - 1:
+ self.norm = RMSNorm(args.dim, eps=args.norm_eps)
+ self.output = nn.Linear(args.dim, args.vocab_size, bias=False)
+ # Initialize all layers but slice off those not of this rank.
+ layers = [
+ TransformerBlock(
+ dim=args.dim,
+ hidden_dim=args.hidden_dim,
+ n_heads=args.n_heads,
+ n_kv_heads=args.n_kv_heads,
+ head_dim=args.head_dim,
+ norm_eps=args.norm_eps,
+ lora=args.lora,
+ moe=args.moe,
+ )
+ for _ in range(args.n_layers)
+ ]
+ num_layers_per_rank = math.ceil(self.n_layers / self.num_pipeline_ranks)
+ offset = self.pipeline_rank * num_layers_per_rank
+ end = min(self.n_layers, offset + num_layers_per_rank)
+ self.layers = nn.ModuleDict({str(i): layers[i] for i in range(offset, end)})
+ self.n_local_layers = len(self.layers)
+
+ @property
+ def dtype(self) -> torch.dtype:
+ return next(self.parameters()).dtype
+
+ @property
+ def device(self) -> torch.device:
+ return next(self.parameters()).device
+
+ @property
+ def freqs_cis(self) -> torch.Tensor:
+ # We cache freqs_cis but need to take care that it is on the right device
+ # and has the right dtype (complex64). The fact that the dtype is different
+ # from the module's dtype means we cannot register it as a buffer
+ if self._precomputed_freqs_cis is None:
+ # default to 10**6
+ theta = self.args.rope_theta or 1000000.0
+ self._precomputed_freqs_cis = precompute_freqs_cis(self.args.head_dim, 128_000, theta)
+
+ if self._precomputed_freqs_cis.device != self.device:
+ self._precomputed_freqs_cis = self._precomputed_freqs_cis.to(device=self.device)
+ return self._precomputed_freqs_cis
+
+ def embed_vision_language_features(self, input_ids: torch.Tensor, images: List[torch.tensor]) -> torch.Tensor: # type: ignore[valid-type]
+ assert self.tok_embeddings is not None
+ assert self.vision_encoder is not None
+ assert self.vision_language_adapter is not None
+ assert self.args.vision_encoder is not None
+
+ text_locations = input_ids != self.args.vision_encoder.image_token_id
+ image_locations = input_ids == self.args.vision_encoder.image_token_id
+ text_features = self.tok_embeddings(input_ids[text_locations])
+ image_features = self.vision_language_adapter(self.vision_encoder(images))
+
+ seq_len = input_ids.shape[0]
+ N_txt, D_txt = text_features.shape
+ N_img, D_img = image_features.shape
+
+ assert D_txt == D_img, f"Text features dim {D_txt} should be equal to image features dim {D_img}"
+ assert (
+ seq_len == N_txt + N_img
+ ), f"seq_len {seq_len} should be equal to N_txt + N_img {(N_txt, N_img, image_locations.sum().item())}"
+
+ combined_features = torch.empty(
+ (seq_len, D_txt),
+ dtype=text_features.dtype,
+ device=text_features.device,
+ )
+ combined_features[text_locations, :] = text_features
+ combined_features[image_locations, :] = image_features
+ return combined_features
+
+ def forward_partial(
+ self,
+ input_ids: torch.Tensor,
+ seqlens: List[int],
+ cache: Optional[BufferCache] = None,
+ images: Optional[List[torch.Tensor]] = None,
+ ) -> torch.Tensor:
+ """Local forward pass.
+
+ If doing pipeline parallelism, this will return the activations of the last layer of this stage.
+ For the last stage, this will return the normalized final embeddings.
+ """
+ assert (
+ len(seqlens) <= self.args.max_batch_size
+ ), f"Max batch size is {self.args.max_batch_size}, got batch size of {len(seqlens)}"
+ (num_toks,) = input_ids.shape
+ assert sum(seqlens) == num_toks, (sum(seqlens), num_toks)
+
+ input_metadata: Union[CacheInputMetadata, SimpleInputMetadata]
+
+ if cache is not None:
+ input_metadata = cache.get_input_metadata(seqlens)
+ else:
+ input_metadata = SimpleInputMetadata.from_seqlens(seqlens, self.device)
+
+ if self.pipeline_rank == 0:
+ assert self.tok_embeddings is not None
+ if self.vision_encoder is not None and images:
+ h = self.embed_vision_language_features(input_ids, images)
+ else:
+ h = self.tok_embeddings(input_ids)
+ else:
+ h = torch.empty(num_toks, self.args.dim, device=self.device, dtype=self.dtype)
+ torch.distributed.recv(h, src=self.pipeline_rank - 1)
+
+ freqs_cis = self.freqs_cis[input_metadata.positions]
+
+ for local_layer_id, layer in enumerate(self.layers.values()):
+ if cache is not None:
+ assert input_metadata is not None
+ assert isinstance(input_metadata, CacheInputMetadata)
+ cache_view = cache.get_view(local_layer_id, input_metadata)
+ else:
+ cache_view = None
+ h = layer(h, freqs_cis, cache_view)
+
+ if cache is not None:
+ cache.update_seqlens(seqlens)
+ if self.pipeline_rank < self.num_pipeline_ranks - 1:
+ torch.distributed.send(h, dst=self.pipeline_rank + 1)
+ return h
+ else:
+ # Last rank has a final normalization step.
+ assert self.norm is not None
+ return self.norm(h) # type: ignore
+
+ def forward(
+ self,
+ input_ids: torch.Tensor,
+ seqlens: List[int],
+ cache: Optional[BufferCache] = None,
+ images: Optional[List[torch.Tensor]] = None,
+ ) -> torch.Tensor:
+ h = self.forward_partial(input_ids, seqlens, cache=cache, images=images)
+ if self.pipeline_rank < self.num_pipeline_ranks - 1:
+ # ignore the intermediate activations as we'll get the final output from
+ # the last stage
+ outs = torch.empty(h.shape[0], self.vocab_size, device=h.device, dtype=h.dtype)
+ else:
+ assert self.output is not None
+ outs = self.output(h)
+ if self.num_pipeline_ranks > 1:
+ torch.distributed.broadcast(outs, src=self.num_pipeline_ranks - 1)
+ return outs.float()
+
+ def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True, assign: bool = False) -> None:
+ state_to_load = {}
+ skipped = set([])
+ for k, v in state_dict.items():
+ if k.startswith("tok_embeddings"):
+ if self.pipeline_rank == 0:
+ state_to_load[k] = v
+ else:
+ logging.debug(
+ "Skipping parameter %s at pipeline rank %d",
+ k,
+ self.pipeline_rank,
+ )
+ skipped.add(k)
+ elif k.startswith("norm") or k.startswith("output"):
+ if self.pipeline_rank == self.num_pipeline_ranks - 1:
+ state_to_load[k] = v
+ else:
+ logging.debug(
+ "Skipping parameter %s at pipeline rank %d",
+ k,
+ self.pipeline_rank,
+ )
+ skipped.add(k)
+ elif k.startswith("layers"):
+ layer_id = k.split(".")[1]
+ if layer_id in self.layers:
+ state_to_load[k] = v
+ else:
+ logging.debug(
+ "Skipping parameter %s at pipeline rank %d",
+ k,
+ self.pipeline_rank,
+ )
+ skipped.add(k)
+ elif k.startswith("vision_encoder") or k.startswith("vision_language_adapter"):
+ assert not self.pipeline_rank
+ state_to_load[k] = v
+ else:
+ raise ValueError(f"Unexpected key {k}")
+ assert set(state_dict.keys()) == skipped.union(set(state_to_load.keys()))
+ super().load_state_dict(state_to_load, strict=strict, assign=assign)
+
+ @staticmethod
+ def from_folder(
+ folder: Union[Path, str],
+ max_batch_size: int = 1,
+ num_pipeline_ranks: int = 1,
+ device: Union[torch.device, str] = "cuda",
+ dtype: Optional[torch.dtype] = None,
+ ) -> "Transformer":
+ with open(Path(folder) / "params.json", "r") as f:
+ model_args = TransformerArgs.from_dict(json.load(f))
+ model_args.max_batch_size = max_batch_size
+ if num_pipeline_ranks > 1:
+ pipeline_rank = torch.distributed.get_rank()
+ else:
+ pipeline_rank = 0
+ with torch.device("meta"):
+ model = Transformer(
+ model_args,
+ pipeline_rank=pipeline_rank,
+ num_pipeline_ranks=num_pipeline_ranks,
+ )
+
+ pt_model_file = Path(folder) / "consolidated.00.pth"
+ safetensors_model_file = Path(folder) / "consolidated.safetensors"
+
+ assert (
+ pt_model_file.exists() or safetensors_model_file.exists()
+ ), f"Make sure either {pt_model_file} or {safetensors_model_file} exists"
+ assert not (
+ pt_model_file.exists() and safetensors_model_file.exists()
+ ), f"Both {pt_model_file} and {safetensors_model_file} cannot exist"
+
+ if pt_model_file.exists():
+ loaded = torch.load(str(pt_model_file), mmap=True)
+ else:
+ loaded = safetensors.torch.load_file(str(safetensors_model_file))
+
+ model.load_state_dict(loaded, assign=True, strict=True)
+
+ return model.to(device=device, dtype=dtype)
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/transformer_layers.py b/parrot/lib/python3.10/site-packages/mistral_inference/transformer_layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ee23f5669f5a2566703bb190344981cb5745aca
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/mistral_inference/transformer_layers.py
@@ -0,0 +1,169 @@
+from functools import partial
+from typing import Optional, Tuple, Type, Union
+
+import torch
+from torch import nn
+from xformers.ops.fmha import memory_efficient_attention # type: ignore
+from xformers.ops.fmha.attn_bias import BlockDiagonalMask
+
+from mistral_inference.args import LoraArgs
+from mistral_inference.cache import CacheView
+from mistral_inference.lora import LoRALinear
+from mistral_inference.moe import MoeArgs, MoeLayer
+from mistral_inference.rope import apply_rotary_emb
+
+
+def repeat_kv(keys: torch.Tensor, values: torch.Tensor, repeats: int, dim: int) -> Tuple[torch.Tensor, torch.Tensor]:
+ keys = torch.repeat_interleave(keys, repeats=repeats, dim=dim)
+ values = torch.repeat_interleave(values, repeats=repeats, dim=dim)
+ return keys, values
+
+
+def maybe_lora(
+ lora_args: Optional[LoraArgs],
+) -> Union[Type[nn.Linear], partial[LoRALinear]]:
+ if lora_args is None:
+ return nn.Linear
+ else:
+ return partial(LoRALinear, rank=lora_args.rank, scaling=lora_args.scaling)
+
+
+class Attention(nn.Module):
+ def __init__(
+ self,
+ dim: int,
+ n_heads: int,
+ head_dim: int,
+ n_kv_heads: int,
+ lora: Optional[LoraArgs] = None,
+ ):
+ super().__init__()
+
+ self.n_heads: int = n_heads
+ self.head_dim: int = head_dim
+ self.n_kv_heads: int = n_kv_heads
+
+ self.repeats = self.n_heads // self.n_kv_heads
+
+ self.scale = self.head_dim**-0.5
+
+ MaybeLora = maybe_lora(lora)
+ self.wq = MaybeLora(dim, n_heads * head_dim, bias=False)
+ self.wk = MaybeLora(dim, n_kv_heads * head_dim, bias=False)
+ self.wv = MaybeLora(dim, n_kv_heads * head_dim, bias=False)
+ self.wo = MaybeLora(n_heads * head_dim, dim, bias=False)
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ freqs_cis: torch.Tensor,
+ cache: Optional[CacheView] = None,
+ mask: Optional[BlockDiagonalMask] = None,
+ ) -> torch.Tensor:
+ assert mask is None or cache is None
+ seqlen_sum, _ = x.shape
+
+ xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
+ xq = xq.view(seqlen_sum, self.n_heads, self.head_dim)
+ xk = xk.view(seqlen_sum, self.n_kv_heads, self.head_dim)
+ xv = xv.view(seqlen_sum, self.n_kv_heads, self.head_dim)
+ xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis)
+
+ if cache is None:
+ key, val = xk, xv
+ elif cache.prefill:
+ key, val = cache.interleave_kv(xk, xv)
+ cache.update(xk, xv)
+ else:
+ cache.update(xk, xv)
+ key, val = cache.key, cache.value
+ key = key.view(seqlen_sum * cache.max_seq_len, self.n_kv_heads, self.head_dim)
+ val = val.view(seqlen_sum * cache.max_seq_len, self.n_kv_heads, self.head_dim)
+
+ # Repeat keys and values to match number of query heads
+ key, val = repeat_kv(key, val, self.repeats, dim=1)
+
+ # xformers requires (B=1, S, H, D)
+ xq, key, val = xq[None, ...], key[None, ...], val[None, ...]
+ output = memory_efficient_attention(xq, key, val, mask if cache is None else cache.mask)
+ output = output.view(seqlen_sum, self.n_heads * self.head_dim)
+
+ assert isinstance(output, torch.Tensor)
+
+ return self.wo(output) # type: ignore
+
+
+class FeedForward(nn.Module):
+ def __init__(self, dim: int, hidden_dim: int, lora: Optional[LoraArgs] = None):
+ super().__init__()
+
+ MaybeLora = maybe_lora(lora)
+ self.w1 = MaybeLora(dim, hidden_dim, bias=False)
+ self.w2 = MaybeLora(hidden_dim, dim, bias=False)
+ self.w3 = MaybeLora(dim, hidden_dim, bias=False)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ return self.w2(nn.functional.silu(self.w1(x)) * self.w3(x)) # type: ignore
+
+
+class RMSNorm(torch.nn.Module):
+ def __init__(self, dim: int, eps: float = 1e-6):
+ super().__init__()
+ self.eps = eps
+ self.weight = nn.Parameter(torch.ones(dim))
+
+ def _norm(self, x: torch.Tensor) -> torch.Tensor:
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ output = self._norm(x.float()).type_as(x)
+ return output * self.weight
+
+
+class TransformerBlock(nn.Module):
+ def __init__(
+ self,
+ dim: int,
+ hidden_dim: int,
+ n_heads: int,
+ n_kv_heads: int,
+ head_dim: int,
+ norm_eps: float,
+ lora: Optional[LoraArgs] = None,
+ moe: Optional[MoeArgs] = None,
+ ):
+ super().__init__()
+ self.n_heads = n_heads
+ self.dim = dim
+ self.attention = Attention(
+ dim=dim,
+ n_heads=n_heads,
+ head_dim=head_dim,
+ n_kv_heads=n_kv_heads,
+ lora=lora,
+ )
+ self.attention_norm = RMSNorm(dim, eps=norm_eps)
+ self.ffn_norm = RMSNorm(dim, eps=norm_eps)
+
+ self.feed_forward: nn.Module
+ if moe is not None:
+ self.feed_forward = MoeLayer(
+ experts=[FeedForward(dim=dim, hidden_dim=hidden_dim, lora=lora) for _ in range(moe.num_experts)],
+ gate=nn.Linear(dim, moe.num_experts, bias=False),
+ moe_args=moe,
+ )
+ else:
+ self.feed_forward = FeedForward(dim=dim, hidden_dim=hidden_dim, lora=lora)
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ freqs_cis: torch.Tensor,
+ cache: Optional[CacheView] = None,
+ mask: Optional[BlockDiagonalMask] = None,
+ ) -> torch.Tensor:
+ r = self.attention.forward(self.attention_norm(x), freqs_cis, cache)
+ h = x + r
+ r = self.feed_forward.forward(self.ffn_norm(h))
+ out = h + r
+ return out
diff --git a/parrot/lib/python3.10/site-packages/mistral_inference/vision_encoder.py b/parrot/lib/python3.10/site-packages/mistral_inference/vision_encoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..833cbb68f9ecd6800cab0c0d5c206c0067c6cee0
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/mistral_inference/vision_encoder.py
@@ -0,0 +1,146 @@
+from typing import List, Optional
+
+import torch
+import torch.nn as nn
+from xformers.ops.fmha.attn_bias import BlockDiagonalMask
+
+from mistral_inference.args import VisionEncoderArgs
+from mistral_inference.rope import precompute_freqs_cis_2d
+from mistral_inference.transformer_layers import RMSNorm, TransformerBlock
+
+
+def position_meshgrid(
+ patch_embeds_list: list[torch.Tensor],
+) -> torch.Tensor:
+ positions = torch.cat(
+ [
+ torch.stack(
+ torch.meshgrid(
+ torch.arange(p.shape[-2]),
+ torch.arange(p.shape[-1]),
+ indexing="ij",
+ ),
+ dim=-1,
+ ).reshape(-1, 2)
+ for p in patch_embeds_list
+ ]
+ )
+ return positions
+
+
+class VisionTransformer(nn.Module):
+ def __init__(self, args: VisionEncoderArgs):
+ super().__init__()
+ self.args = args
+ self.patch_conv = nn.Conv2d(
+ in_channels=args.num_channels,
+ out_channels=args.hidden_size,
+ kernel_size=args.patch_size,
+ stride=args.patch_size,
+ bias=False,
+ )
+ self.ln_pre = RMSNorm(args.hidden_size, eps=1e-5)
+ self.transformer = VisionTransformerBlocks(args)
+
+ head_dim = self.args.hidden_size // self.args.num_attention_heads
+ assert head_dim % 2 == 0, "ROPE requires even head_dim"
+ self._freqs_cis: Optional[torch.Tensor] = None
+
+ @property
+ def max_patches_per_side(self) -> int:
+ return self.args.image_size // self.args.patch_size
+
+ @property
+ def device(self) -> torch.device:
+ return next(self.parameters()).device
+
+ @property
+ def freqs_cis(self) -> torch.Tensor:
+ if self._freqs_cis is None:
+ self._freqs_cis = precompute_freqs_cis_2d(
+ dim=self.args.hidden_size // self.args.num_attention_heads,
+ height=self.max_patches_per_side,
+ width=self.max_patches_per_side,
+ theta=self.args.rope_theta,
+ )
+
+ if self._freqs_cis.device != self.device:
+ self._freqs_cis = self._freqs_cis.to(device=self.device)
+
+ return self._freqs_cis
+
+ def forward(
+ self,
+ images: List[torch.Tensor],
+ ) -> torch.Tensor:
+ """
+ Args:
+ images: list of N_img images of variable sizes, each of shape (C, H, W)
+
+ Returns:
+ image_features: tensor of token features for all tokens of all images of
+ shape (N_toks, D)
+ """
+ # pass images through initial convolution independently
+ patch_embeds_list = [self.patch_conv(img.unsqueeze(0)).squeeze(0) for img in images]
+
+ # flatten to a single sequence
+ patch_embeds = torch.cat([p.flatten(1).permute(1, 0) for p in patch_embeds_list], dim=0)
+ patch_embeds = self.ln_pre(patch_embeds)
+
+ # positional embeddings
+ positions = position_meshgrid(patch_embeds_list).to(self.device)
+ freqs_cis = self.freqs_cis[positions[:, 0], positions[:, 1]]
+
+ # pass through Transformer with a block diagonal mask delimiting images
+ mask = BlockDiagonalMask.from_seqlens(
+ [p.shape[-2] * p.shape[-1] for p in patch_embeds_list],
+ )
+ out = self.transformer(patch_embeds, mask=mask, freqs_cis=freqs_cis)
+
+ # remove batch dimension of the single sequence
+ return out # type: ignore[no-any-return]
+
+
+class VisionLanguageAdapter(nn.Module):
+ def __init__(self, in_dim: int, out_dim: int):
+ super().__init__()
+ self.w_in = nn.Linear(
+ in_dim,
+ out_dim,
+ bias=True,
+ )
+ self.gelu = nn.GELU()
+ self.w_out = nn.Linear(out_dim, out_dim, bias=True)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ return self.w_out(self.gelu(self.w_in(x))) # type: ignore[no-any-return]
+
+
+class VisionTransformerBlocks(nn.Module):
+ def __init__(self, args: VisionEncoderArgs):
+ super().__init__()
+ self.layers = torch.nn.ModuleList()
+ for _ in range(args.num_hidden_layers):
+ self.layers.append(
+ TransformerBlock(
+ dim=args.hidden_size,
+ hidden_dim=args.intermediate_size,
+ n_heads=args.num_attention_heads,
+ n_kv_heads=args.num_attention_heads,
+ head_dim=args.hidden_size // args.num_attention_heads,
+ norm_eps=1e-5,
+ )
+ )
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ mask: BlockDiagonalMask,
+ freqs_cis: Optional[torch.Tensor],
+ ) -> torch.Tensor:
+ for layer in self.layers:
+ x = layer(x, mask=mask, freqs_cis=freqs_cis)
+ return x
+
+
diff --git a/parrot/lib/python3.10/site-packages/pillow.libs/libXau-154567c4.so.6.0.0 b/parrot/lib/python3.10/site-packages/pillow.libs/libXau-154567c4.so.6.0.0
new file mode 100644
index 0000000000000000000000000000000000000000..ff06a58be7b9ff80cee9b8eb45d5e9a28cf67d1b
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/pillow.libs/libXau-154567c4.so.6.0.0 differ
diff --git a/parrot/lib/python3.10/site-packages/pillow.libs/libbrotlidec-ba690955.so.1 b/parrot/lib/python3.10/site-packages/pillow.libs/libbrotlidec-ba690955.so.1
new file mode 100644
index 0000000000000000000000000000000000000000..b59afaea94084ff905a85348024ff00eecc7bb1f
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/pillow.libs/libbrotlidec-ba690955.so.1 differ
diff --git a/parrot/lib/python3.10/site-packages/pillow.libs/libsharpyuv-898c0cb5.so.0.1.0 b/parrot/lib/python3.10/site-packages/pillow.libs/libsharpyuv-898c0cb5.so.0.1.0
new file mode 100644
index 0000000000000000000000000000000000000000..d384f32efa5b631d5c0635cc2e2562ef53a040e7
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/pillow.libs/libsharpyuv-898c0cb5.so.0.1.0 differ
diff --git a/parrot/lib/python3.10/site-packages/pillow.libs/libwebpdemux-f2642bcc.so.2.0.15 b/parrot/lib/python3.10/site-packages/pillow.libs/libwebpdemux-f2642bcc.so.2.0.15
new file mode 100644
index 0000000000000000000000000000000000000000..cf0f8f4584bc85fa4bc52792adffc0721fee85ec
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/pillow.libs/libwebpdemux-f2642bcc.so.2.0.15 differ
diff --git a/parrot/lib/python3.10/site-packages/pillow.libs/libwebpmux-d524b4d5.so.3.1.0 b/parrot/lib/python3.10/site-packages/pillow.libs/libwebpmux-d524b4d5.so.3.1.0
new file mode 100644
index 0000000000000000000000000000000000000000..2c9c58f53e72b061479681be1d445e4a4ba80557
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/pillow.libs/libwebpmux-d524b4d5.so.3.1.0 differ
diff --git a/parrot/lib/python3.10/site-packages/scripts/__pycache__/test_imports.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scripts/__pycache__/test_imports.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d211a00d52a63c1c9375c3db25f04a022d5d67f4
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scripts/__pycache__/test_imports.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/scripts/__pycache__/test_leak.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scripts/__pycache__/test_leak.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9778e0c46b75e1efdec35f065509d0c56c57feb5
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scripts/__pycache__/test_leak.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/scripts/run_emscripten_tests.py b/parrot/lib/python3.10/site-packages/scripts/run_emscripten_tests.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a4b4a4e0561446c3d0e3db3fa68b02423f27342
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/scripts/run_emscripten_tests.py
@@ -0,0 +1,343 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+
+import argparse
+import contextlib
+import http.server
+import os
+import queue
+import shutil
+import subprocess
+import sys
+import time
+import threading
+
+from pathlib import Path
+from io import BytesIO
+
+from selenium import webdriver
+
+
+class TemplateOverrider(http.server.SimpleHTTPRequestHandler):
+ def log_request(self, code="-", size="-"):
+ # don't log successful requests
+ return
+
+ def do_GET(self) -> bytes | None:
+ if self.path.endswith(PYARROW_WHEEL_PATH.name):
+ self.send_response(200)
+ self.send_header("Content-type", "application/x-zip")
+ self.end_headers()
+ with PYARROW_WHEEL_PATH.open(mode="rb") as wheel:
+ self.copyfile(wheel, self.wfile)
+ if self.path.endswith("/test.html"):
+ body = b"""
+
+
+
+
+
+
+
+ """
+ self.send_response(200)
+ self.send_header("Content-type", "text/html")
+ self.send_header("Content-length", len(body))
+ self.end_headers()
+ self.copyfile(BytesIO(body), self.wfile)
+ elif self.path.endswith("/worker.js"):
+ body = b"""
+ importScripts("./pyodide.js");
+ onmessage = async function (e) {
+ const data = e.data;
+ if (!self.pyodide) {
+ self.pyodide = await loadPyodide();
+ }
+ function do_print(arg) {
+ let databytes = Array.from(arg);
+ self.postMessage({print:databytes});
+ return databytes.length;
+ }
+ self.pyodide.setStdout({write:do_print,isatty:data.isatty});
+ self.pyodide.setStderr({write:do_print,isatty:data.isatty});
+
+ await self.pyodide.loadPackagesFromImports(data.python);
+ let results = await self.pyodide.runPythonAsync(data.python);
+ self.postMessage({results});
+ }
+ """
+ self.send_response(200)
+ self.send_header("Content-type", "application/javascript")
+ self.send_header("Content-length", len(body))
+ self.end_headers()
+ self.copyfile(BytesIO(body), self.wfile)
+
+ else:
+ return super().do_GET()
+
+ def end_headers(self):
+ # Enable Cross-Origin Resource Sharing (CORS)
+ self.send_header("Access-Control-Allow-Origin", "*")
+ super().end_headers()
+
+
+def run_server_thread(dist_dir, q):
+ global _SERVER_ADDRESS
+ os.chdir(dist_dir)
+ server = http.server.HTTPServer(("", 0), TemplateOverrider)
+ q.put(server.server_address)
+ print(f"Starting server for {dist_dir} at: {server.server_address}")
+ server.serve_forever()
+
+
+@contextlib.contextmanager
+def launch_server(dist_dir):
+ q = queue.Queue()
+ p = threading.Thread(target=run_server_thread, args=[dist_dir, q], daemon=True)
+ p.start()
+ address = q.get(timeout=50)
+ time.sleep(0.1) # wait to make sure server is started
+ yield address
+ p.terminate()
+
+
+class NodeDriver:
+ import subprocess
+
+ def __init__(self, hostname, port):
+ self.process = subprocess.Popen(
+ [shutil.which("script"), "-c", shutil.which("node")],
+ stdin=subprocess.PIPE,
+ shell=False,
+ bufsize=0,
+ )
+ print(self.process)
+ time.sleep(0.1) # wait for node to start
+ self.hostname = hostname
+ self.port = port
+ self.last_ret_code = None
+
+ def load_pyodide(self, dist_dir):
+ self.execute_js(
+ f"""
+ const {{ loadPyodide }} = require('{dist_dir}/pyodide.js');
+ let pyodide = await loadPyodide();
+ """
+ )
+
+ def clear_logs(self):
+ pass # we don't handle logs for node
+
+ def write_stdin(self, buffer):
+ # because we use unbuffered IO for
+ # stdout, stdin.write is also unbuffered
+ # so might under-run on writes
+ while len(buffer) > 0 and self.process.poll() is None:
+ written = self.process.stdin.write(buffer)
+ if written == len(buffer):
+ break
+ elif written == 0:
+ # full buffer - wait
+ time.sleep(0.01)
+ else:
+ buffer = buffer[written:]
+
+ def execute_js(self, code, wait_for_terminate=True):
+ self.write_stdin((code + "\n").encode("utf-8"))
+
+ def load_arrow(self):
+ self.execute_js(f"await pyodide.loadPackage('{PYARROW_WHEEL_PATH}');")
+
+ def execute_python(self, code, wait_for_terminate=True):
+ js_code = f"""
+ python = `{code}`;
+ await pyodide.loadPackagesFromImports(python);
+ python_output = await pyodide.runPythonAsync(python);
+ """
+ self.last_ret_code = self.execute_js(js_code, wait_for_terminate)
+ return self.last_ret_code
+
+ def wait_for_done(self):
+ # in node we just let it run above
+ # then send EOF and join process
+ self.write_stdin(b"process.exit(python_output)\n")
+ return self.process.wait()
+
+
+class BrowserDriver:
+ def __init__(self, hostname, port, driver):
+ self.driver = driver
+ self.driver.get(f"http://{hostname}:{port}/test.html")
+ self.driver.set_script_timeout(100)
+
+ def load_pyodide(self, dist_dir):
+ pass
+
+ def load_arrow(self):
+ self.execute_python(
+ f"import pyodide_js as pjs\n"
+ f"await pjs.loadPackage('{PYARROW_WHEEL_PATH.name}')\n"
+ )
+
+ def execute_python(self, code, wait_for_terminate=True):
+ if wait_for_terminate:
+ self.driver.execute_async_script(
+ f"""
+ let callback = arguments[arguments.length-1];
+ python = `{code}`;
+ window.python_done_callback = callback;
+ window.pyworker.postMessage(
+ {{python, isatty: {'true' if sys.stdout.isatty() else 'false'}}});
+ """
+ )
+ else:
+ self.driver.execute_script(
+ f"""
+ let python = `{code}`;
+ window.python_done_callback= (x) => {{window.python_script_done=x;}};
+ window.pyworker.postMessage(
+ {{python,isatty:{'true' if sys.stdout.isatty() else 'false'}}});
+ """
+ )
+
+ def clear_logs(self):
+ self.driver.execute_script("window.python_logs = [];")
+
+ def wait_for_done(self):
+ while True:
+ # poll for console.log messages from our webworker
+ # which are the output of pytest
+ lines = self.driver.execute_script(
+ "let temp = window.python_logs;window.python_logs=[];return temp;"
+ )
+ if len(lines) > 0:
+ sys.stdout.buffer.write(bytes(lines))
+ done = self.driver.execute_script("return window.python_script_done;")
+ if done is not None:
+ value = done["result"]
+ self.driver.execute_script("delete window.python_script_done;")
+ return value
+ time.sleep(0.1)
+
+
+class ChromeDriver(BrowserDriver):
+ def __init__(self, hostname, port):
+ from selenium.webdriver.chrome.options import Options
+
+ options = Options()
+ options.add_argument("--headless")
+ options.add_argument("--no-sandbox")
+ super().__init__(hostname, port, webdriver.Chrome(options=options))
+
+
+class FirefoxDriver(BrowserDriver):
+ def __init__(self, hostname, port):
+ from selenium.webdriver.firefox.options import Options
+
+ options = Options()
+ options.add_argument("--headless")
+
+ super().__init__(hostname, port, webdriver.Firefox(options=options))
+
+
+def _load_pyarrow_in_runner(driver, wheel_name):
+ driver.load_arrow()
+ driver.execute_python(
+ """import sys
+import micropip
+if "pyarrow" not in sys.modules:
+ await micropip.install("hypothesis")
+ import pyodide_js as pjs
+ await pjs.loadPackage("numpy")
+ await pjs.loadPackage("pandas")
+ import pytest
+ import pandas # import pandas after pyarrow package load for pandas/pyarrow
+ # functions to work
+import pyarrow
+ """,
+ wait_for_terminate=True,
+ )
+
+
+parser = argparse.ArgumentParser()
+parser.add_argument(
+ "-d",
+ "--dist-dir",
+ type=str,
+ help="Pyodide distribution directory",
+ default="./pyodide",
+)
+parser.add_argument("wheel", type=str, help="Wheel to run tests from")
+parser.add_argument(
+ "-t", "--test-submodule", help="Submodule that tests live in", default="test"
+)
+parser.add_argument(
+ "-r",
+ "--runtime",
+ type=str,
+ choices=["chrome", "node", "firefox"],
+ help="Runtime to run tests in",
+ default="chrome",
+)
+args = parser.parse_args()
+
+PYARROW_WHEEL_PATH = Path(args.wheel).resolve()
+
+dist_dir = Path(os.getcwd(), args.dist_dir).resolve()
+print(f"dist dir={dist_dir}")
+with launch_server(dist_dir) as (hostname, port):
+ if args.runtime == "chrome":
+ driver = ChromeDriver(hostname, port)
+ elif args.runtime == "node":
+ driver = NodeDriver(hostname, port)
+ elif args.runtime == "firefox":
+ driver = FirefoxDriver(hostname, port)
+
+ print("Load pyodide in browser")
+ driver.load_pyodide(dist_dir)
+ print("Load pyarrow in browser")
+ _load_pyarrow_in_runner(driver, Path(args.wheel).name)
+ driver.clear_logs()
+ print("Run pytest in browser")
+ driver.execute_python(
+ """
+import pyarrow,pathlib
+pyarrow_dir = pathlib.Path(pyarrow.__file__).parent
+pytest.main([pyarrow_dir, '-v'])
+""",
+ wait_for_terminate=False,
+ )
+ print("Wait for done")
+ os._exit(driver.wait_for_done())
diff --git a/parrot/lib/python3.10/site-packages/scripts/test_leak.py b/parrot/lib/python3.10/site-packages/scripts/test_leak.py
new file mode 100644
index 0000000000000000000000000000000000000000..86a87f5e742e804ea1d18df90bb0c06dcea3dcb7
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/scripts/test_leak.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import pyarrow as pa
+import numpy as np
+import pandas as pd
+from pyarrow.tests.util import rands
+import memory_profiler
+import gc
+import io
+
+MEGABYTE = 1 << 20
+
+
+def assert_does_not_leak(f, iterations=10, check_interval=1, tolerance=5):
+ gc.collect()
+ baseline = memory_profiler.memory_usage()[0]
+ for i in range(iterations):
+ f()
+ if i % check_interval == 0:
+ gc.collect()
+ usage = memory_profiler.memory_usage()[0]
+ diff = usage - baseline
+ print("{0}: {1}\r".format(i, diff), end="")
+ if diff > tolerance:
+ raise Exception("Memory increased by {0} megabytes after {1} "
+ "iterations".format(diff, i + 1))
+ gc.collect()
+ usage = memory_profiler.memory_usage()[0]
+ diff = usage - baseline
+ print("\nMemory increased by {0} megabytes after {1} "
+ "iterations".format(diff, iterations))
+
+
+def test_leak1():
+ data = [pa.array(np.concatenate([np.random.randn(100000)] * 1000))]
+ table = pa.Table.from_arrays(data, ['foo'])
+
+ def func():
+ table.to_pandas()
+ assert_does_not_leak(func)
+
+
+def test_leak2():
+ data = [pa.array(np.concatenate([np.random.randn(100000)] * 10))]
+ table = pa.Table.from_arrays(data, ['foo'])
+
+ def func():
+ df = table.to_pandas()
+
+ batch = pa.RecordBatch.from_pandas(df)
+
+ sink = io.BytesIO()
+ writer = pa.RecordBatchFileWriter(sink, batch.schema)
+ writer.write_batch(batch)
+ writer.close()
+
+ buf_reader = pa.BufferReader(sink.getvalue())
+ reader = pa.open_file(buf_reader)
+ reader.read_all()
+
+ assert_does_not_leak(func, iterations=50, tolerance=50)
+
+
+def test_leak3():
+ import pyarrow.parquet as pq
+
+ df = pd.DataFrame({'a{0}'.format(i): [1, 2, 3, 4]
+ for i in range(50)})
+ table = pa.Table.from_pandas(df, preserve_index=False)
+
+ writer = pq.ParquetWriter('leak_test_' + rands(5) + '.parquet',
+ table.schema)
+
+ def func():
+ writer.write_table(table, row_group_size=len(table))
+
+ # This does not "leak" per se but we do want to have this use as little
+ # memory as possible
+ assert_does_not_leak(func, iterations=500,
+ check_interval=50, tolerance=20)
+
+
+def test_ARROW_8801():
+ x = pd.to_datetime(np.random.randint(0, 2**32, size=2**20, dtype=np.int64),
+ unit='ms', utc=True)
+ table = pa.table(pd.DataFrame({'x': x}))
+
+ assert_does_not_leak(lambda: table.to_pandas(split_blocks=False),
+ iterations=1000, check_interval=50, tolerance=1000)
+
+
+if __name__ == '__main__':
+ test_ARROW_8801()
diff --git a/parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/PKG-INFO b/parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/PKG-INFO
new file mode 100644
index 0000000000000000000000000000000000000000..0c941771c526fbff823bfa8501a27faab8f2e993
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/PKG-INFO
@@ -0,0 +1,142 @@
+Metadata-Version: 2.2
+Name: setuptools
+Version: 75.8.0
+Summary: Easily download, build, install, upgrade, and uninstall Python packages
+Author-email: Python Packaging Authority
+Project-URL: Source, https://github.com/pypa/setuptools
+Project-URL: Documentation, https://setuptools.pypa.io/
+Project-URL: Changelog, https://setuptools.pypa.io/en/stable/history.html
+Keywords: CPAN PyPI distutils eggs package management
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: System :: Archiving :: Packaging
+Classifier: Topic :: System :: Systems Administration
+Classifier: Topic :: Utilities
+Requires-Python: >=3.9
+Description-Content-Type: text/x-rst
+License-File: LICENSE
+Provides-Extra: test
+Requires-Dist: pytest!=8.1.*,>=6; extra == "test"
+Requires-Dist: virtualenv>=13.0.0; extra == "test"
+Requires-Dist: wheel>=0.44.0; extra == "test"
+Requires-Dist: pip>=19.1; extra == "test"
+Requires-Dist: packaging>=24.2; extra == "test"
+Requires-Dist: jaraco.envs>=2.2; extra == "test"
+Requires-Dist: pytest-xdist>=3; extra == "test"
+Requires-Dist: jaraco.path>=3.7.2; extra == "test"
+Requires-Dist: build[virtualenv]>=1.0.3; extra == "test"
+Requires-Dist: filelock>=3.4.0; extra == "test"
+Requires-Dist: ini2toml[lite]>=0.14; extra == "test"
+Requires-Dist: tomli-w>=1.0.0; extra == "test"
+Requires-Dist: pytest-timeout; extra == "test"
+Requires-Dist: pytest-perf; sys_platform != "cygwin" and extra == "test"
+Requires-Dist: jaraco.develop>=7.21; (python_version >= "3.9" and sys_platform != "cygwin") and extra == "test"
+Requires-Dist: pytest-home>=0.5; extra == "test"
+Requires-Dist: pytest-subprocess; extra == "test"
+Requires-Dist: pyproject-hooks!=1.1; extra == "test"
+Requires-Dist: jaraco.test>=5.5; extra == "test"
+Provides-Extra: doc
+Requires-Dist: sphinx>=3.5; extra == "doc"
+Requires-Dist: jaraco.packaging>=9.3; extra == "doc"
+Requires-Dist: rst.linker>=1.9; extra == "doc"
+Requires-Dist: furo; extra == "doc"
+Requires-Dist: sphinx-lint; extra == "doc"
+Requires-Dist: jaraco.tidelift>=1.4; extra == "doc"
+Requires-Dist: pygments-github-lexers==0.0.5; extra == "doc"
+Requires-Dist: sphinx-favicon; extra == "doc"
+Requires-Dist: sphinx-inline-tabs; extra == "doc"
+Requires-Dist: sphinx-reredirects; extra == "doc"
+Requires-Dist: sphinxcontrib-towncrier; extra == "doc"
+Requires-Dist: sphinx-notfound-page<2,>=1; extra == "doc"
+Requires-Dist: pyproject-hooks!=1.1; extra == "doc"
+Requires-Dist: towncrier<24.7; extra == "doc"
+Provides-Extra: ssl
+Provides-Extra: certs
+Provides-Extra: core
+Requires-Dist: packaging>=24.2; extra == "core"
+Requires-Dist: more_itertools>=8.8; extra == "core"
+Requires-Dist: jaraco.text>=3.7; extra == "core"
+Requires-Dist: importlib_metadata>=6; python_version < "3.10" and extra == "core"
+Requires-Dist: tomli>=2.0.1; python_version < "3.11" and extra == "core"
+Requires-Dist: wheel>=0.43.0; extra == "core"
+Requires-Dist: platformdirs>=4.2.2; extra == "core"
+Requires-Dist: jaraco.collections; extra == "core"
+Requires-Dist: jaraco.functools>=4; extra == "core"
+Requires-Dist: packaging; extra == "core"
+Requires-Dist: more_itertools; extra == "core"
+Provides-Extra: check
+Requires-Dist: pytest-checkdocs>=2.4; extra == "check"
+Requires-Dist: pytest-ruff>=0.2.1; sys_platform != "cygwin" and extra == "check"
+Requires-Dist: ruff>=0.8.0; sys_platform != "cygwin" and extra == "check"
+Provides-Extra: cover
+Requires-Dist: pytest-cov; extra == "cover"
+Provides-Extra: enabler
+Requires-Dist: pytest-enabler>=2.2; extra == "enabler"
+Provides-Extra: type
+Requires-Dist: pytest-mypy; extra == "type"
+Requires-Dist: mypy==1.14.*; extra == "type"
+Requires-Dist: importlib_metadata>=7.0.2; python_version < "3.10" and extra == "type"
+Requires-Dist: jaraco.develop>=7.21; sys_platform != "cygwin" and extra == "type"
+
+.. |pypi-version| image:: https://img.shields.io/pypi/v/setuptools.svg
+ :target: https://pypi.org/project/setuptools
+
+.. |py-version| image:: https://img.shields.io/pypi/pyversions/setuptools.svg
+
+.. |test-badge| image:: https://github.com/pypa/setuptools/actions/workflows/main.yml/badge.svg
+ :target: https://github.com/pypa/setuptools/actions?query=workflow%3A%22tests%22
+ :alt: tests
+
+.. |ruff-badge| image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/charliermarsh/ruff/main/assets/badge/v2.json
+ :target: https://github.com/astral-sh/ruff
+ :alt: Ruff
+
+.. |docs-badge| image:: https://img.shields.io/readthedocs/setuptools/latest.svg
+ :target: https://setuptools.pypa.io
+
+.. |skeleton-badge| image:: https://img.shields.io/badge/skeleton-2024-informational
+ :target: https://blog.jaraco.com/skeleton
+
+.. |codecov-badge| image:: https://img.shields.io/codecov/c/github/pypa/setuptools/master.svg?logo=codecov&logoColor=white
+ :target: https://codecov.io/gh/pypa/setuptools
+
+.. |tidelift-badge| image:: https://tidelift.com/badges/github/pypa/setuptools?style=flat
+ :target: https://tidelift.com/subscription/pkg/pypi-setuptools?utm_source=pypi-setuptools&utm_medium=readme
+
+.. |discord-badge| image:: https://img.shields.io/discord/803025117553754132
+ :target: https://discord.com/channels/803025117553754132/815945031150993468
+ :alt: Discord
+
+|pypi-version| |py-version| |test-badge| |ruff-badge| |docs-badge| |skeleton-badge| |codecov-badge| |discord-badge|
+
+See the `Quickstart `_
+and the `User's Guide `_ for
+instructions on how to use Setuptools.
+
+Questions and comments should be directed to `GitHub Discussions
+`_.
+Bug reports and especially tested patches may be
+submitted directly to the `bug tracker
+`_.
+
+
+Code of Conduct
+===============
+
+Everyone interacting in the setuptools project's codebases, issue trackers,
+chat rooms, and fora is expected to follow the
+`PSF Code of Conduct `_.
+
+
+For Enterprise
+==============
+
+Available as part of the Tidelift Subscription.
+
+Setuptools and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use.
+
+`Learn more `_.
diff --git a/parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/SOURCES.txt b/parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/SOURCES.txt
new file mode 100644
index 0000000000000000000000000000000000000000..29082151c0f8f791aadab0533d15c3e345d23835
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/SOURCES.txt
@@ -0,0 +1,571 @@
+LICENSE
+MANIFEST.in
+NEWS.rst
+README.rst
+conftest.py
+exercises.py
+launcher.c
+mypy.ini
+pyproject.toml
+pytest.ini
+setup.cfg
+setup.py
+tox.ini
+_distutils_hack/__init__.py
+_distutils_hack/override.py
+docs/artwork.rst
+docs/build_meta.rst
+docs/conf.py
+docs/history.rst
+docs/index.rst
+docs/pkg_resources.rst
+docs/python 2 sunset.rst
+docs/roadmap.rst
+docs/setuptools.rst
+docs/deprecated/changed_keywords.rst
+docs/deprecated/commands.rst
+docs/deprecated/dependency_links.rst
+docs/deprecated/distutils-legacy.rst
+docs/deprecated/easy_install.rst
+docs/deprecated/functionalities.rst
+docs/deprecated/index.rst
+docs/deprecated/python_eggs.rst
+docs/deprecated/resource_extraction.rst
+docs/deprecated/zip_safe.rst
+docs/deprecated/distutils/_setuptools_disclaimer.rst
+docs/deprecated/distutils/apiref.rst
+docs/deprecated/distutils/builtdist.rst
+docs/deprecated/distutils/commandref.rst
+docs/deprecated/distutils/configfile.rst
+docs/deprecated/distutils/examples.rst
+docs/deprecated/distutils/extending.rst
+docs/deprecated/distutils/index.rst
+docs/deprecated/distutils/introduction.rst
+docs/deprecated/distutils/packageindex.rst
+docs/deprecated/distutils/setupscript.rst
+docs/deprecated/distutils/sourcedist.rst
+docs/deprecated/distutils/uploading.rst
+docs/development/developer-guide.rst
+docs/development/index.rst
+docs/development/releases.rst
+docs/references/keywords.rst
+docs/userguide/datafiles.rst
+docs/userguide/declarative_config.rst
+docs/userguide/dependency_management.rst
+docs/userguide/development_mode.rst
+docs/userguide/distribution.rst
+docs/userguide/entry_point.rst
+docs/userguide/ext_modules.rst
+docs/userguide/extension.rst
+docs/userguide/index.rst
+docs/userguide/miscellaneous.rst
+docs/userguide/package_discovery.rst
+docs/userguide/pyproject_config.rst
+docs/userguide/quickstart.rst
+newsfragments/.gitignore
+newsfragments/README.rst
+pkg_resources/__init__.py
+pkg_resources/api_tests.txt
+pkg_resources/py.typed
+pkg_resources/tests/__init__.py
+pkg_resources/tests/test_find_distributions.py
+pkg_resources/tests/test_integration_zope_interface.py
+pkg_resources/tests/test_markers.py
+pkg_resources/tests/test_pkg_resources.py
+pkg_resources/tests/test_resources.py
+pkg_resources/tests/test_working_set.py
+pkg_resources/tests/data/my-test-package-source/setup.cfg
+pkg_resources/tests/data/my-test-package-source/setup.py
+pkg_resources/tests/data/my-test-package-zip/my-test-package.zip
+pkg_resources/tests/data/my-test-package_unpacked-egg/my_test_package-1.0-py3.7.egg/EGG-INFO/PKG-INFO
+pkg_resources/tests/data/my-test-package_unpacked-egg/my_test_package-1.0-py3.7.egg/EGG-INFO/SOURCES.txt
+pkg_resources/tests/data/my-test-package_unpacked-egg/my_test_package-1.0-py3.7.egg/EGG-INFO/dependency_links.txt
+pkg_resources/tests/data/my-test-package_unpacked-egg/my_test_package-1.0-py3.7.egg/EGG-INFO/top_level.txt
+pkg_resources/tests/data/my-test-package_unpacked-egg/my_test_package-1.0-py3.7.egg/EGG-INFO/zip-safe
+pkg_resources/tests/data/my-test-package_zipped-egg/my_test_package-1.0-py3.7.egg
+setuptools/__init__.py
+setuptools/_core_metadata.py
+setuptools/_entry_points.py
+setuptools/_imp.py
+setuptools/_importlib.py
+setuptools/_itertools.py
+setuptools/_normalization.py
+setuptools/_path.py
+setuptools/_reqs.py
+setuptools/_shutil.py
+setuptools/_static.py
+setuptools/archive_util.py
+setuptools/build_meta.py
+setuptools/cli-32.exe
+setuptools/cli-64.exe
+setuptools/cli-arm64.exe
+setuptools/cli.exe
+setuptools/depends.py
+setuptools/discovery.py
+setuptools/dist.py
+setuptools/errors.py
+setuptools/extension.py
+setuptools/glob.py
+setuptools/gui-32.exe
+setuptools/gui-64.exe
+setuptools/gui-arm64.exe
+setuptools/gui.exe
+setuptools/installer.py
+setuptools/launch.py
+setuptools/logging.py
+setuptools/modified.py
+setuptools/monkey.py
+setuptools/msvc.py
+setuptools/namespaces.py
+setuptools/package_index.py
+setuptools/sandbox.py
+setuptools/script (dev).tmpl
+setuptools/script.tmpl
+setuptools/unicode_utils.py
+setuptools/version.py
+setuptools/warnings.py
+setuptools/wheel.py
+setuptools/windows_support.py
+setuptools.egg-info/PKG-INFO
+setuptools.egg-info/SOURCES.txt
+setuptools.egg-info/dependency_links.txt
+setuptools.egg-info/entry_points.txt
+setuptools.egg-info/requires.txt
+setuptools.egg-info/top_level.txt
+setuptools/_distutils/__init__.py
+setuptools/_distutils/_log.py
+setuptools/_distutils/_macos_compat.py
+setuptools/_distutils/_modified.py
+setuptools/_distutils/_msvccompiler.py
+setuptools/_distutils/archive_util.py
+setuptools/_distutils/ccompiler.py
+setuptools/_distutils/cmd.py
+setuptools/_distutils/core.py
+setuptools/_distutils/cygwinccompiler.py
+setuptools/_distutils/debug.py
+setuptools/_distutils/dep_util.py
+setuptools/_distutils/dir_util.py
+setuptools/_distutils/dist.py
+setuptools/_distutils/errors.py
+setuptools/_distutils/extension.py
+setuptools/_distutils/fancy_getopt.py
+setuptools/_distutils/file_util.py
+setuptools/_distutils/filelist.py
+setuptools/_distutils/log.py
+setuptools/_distutils/spawn.py
+setuptools/_distutils/sysconfig.py
+setuptools/_distutils/text_file.py
+setuptools/_distutils/unixccompiler.py
+setuptools/_distutils/util.py
+setuptools/_distutils/version.py
+setuptools/_distutils/versionpredicate.py
+setuptools/_distutils/zosccompiler.py
+setuptools/_distutils/command/__init__.py
+setuptools/_distutils/command/_framework_compat.py
+setuptools/_distutils/command/bdist.py
+setuptools/_distutils/command/bdist_dumb.py
+setuptools/_distutils/command/bdist_rpm.py
+setuptools/_distutils/command/build.py
+setuptools/_distutils/command/build_clib.py
+setuptools/_distutils/command/build_ext.py
+setuptools/_distutils/command/build_py.py
+setuptools/_distutils/command/build_scripts.py
+setuptools/_distutils/command/check.py
+setuptools/_distutils/command/clean.py
+setuptools/_distutils/command/config.py
+setuptools/_distutils/command/install.py
+setuptools/_distutils/command/install_data.py
+setuptools/_distutils/command/install_egg_info.py
+setuptools/_distutils/command/install_headers.py
+setuptools/_distutils/command/install_lib.py
+setuptools/_distutils/command/install_scripts.py
+setuptools/_distutils/command/sdist.py
+setuptools/_distutils/compat/__init__.py
+setuptools/_distutils/compat/py39.py
+setuptools/_distutils/tests/__init__.py
+setuptools/_distutils/tests/support.py
+setuptools/_distutils/tests/test_archive_util.py
+setuptools/_distutils/tests/test_bdist.py
+setuptools/_distutils/tests/test_bdist_dumb.py
+setuptools/_distutils/tests/test_bdist_rpm.py
+setuptools/_distutils/tests/test_build.py
+setuptools/_distutils/tests/test_build_clib.py
+setuptools/_distutils/tests/test_build_ext.py
+setuptools/_distutils/tests/test_build_py.py
+setuptools/_distutils/tests/test_build_scripts.py
+setuptools/_distutils/tests/test_ccompiler.py
+setuptools/_distutils/tests/test_check.py
+setuptools/_distutils/tests/test_clean.py
+setuptools/_distutils/tests/test_cmd.py
+setuptools/_distutils/tests/test_config_cmd.py
+setuptools/_distutils/tests/test_core.py
+setuptools/_distutils/tests/test_cygwinccompiler.py
+setuptools/_distutils/tests/test_dir_util.py
+setuptools/_distutils/tests/test_dist.py
+setuptools/_distutils/tests/test_extension.py
+setuptools/_distutils/tests/test_file_util.py
+setuptools/_distutils/tests/test_filelist.py
+setuptools/_distutils/tests/test_install.py
+setuptools/_distutils/tests/test_install_data.py
+setuptools/_distutils/tests/test_install_headers.py
+setuptools/_distutils/tests/test_install_lib.py
+setuptools/_distutils/tests/test_install_scripts.py
+setuptools/_distutils/tests/test_log.py
+setuptools/_distutils/tests/test_mingwccompiler.py
+setuptools/_distutils/tests/test_modified.py
+setuptools/_distutils/tests/test_msvccompiler.py
+setuptools/_distutils/tests/test_sdist.py
+setuptools/_distutils/tests/test_spawn.py
+setuptools/_distutils/tests/test_sysconfig.py
+setuptools/_distutils/tests/test_text_file.py
+setuptools/_distutils/tests/test_unixccompiler.py
+setuptools/_distutils/tests/test_util.py
+setuptools/_distutils/tests/test_version.py
+setuptools/_distutils/tests/test_versionpredicate.py
+setuptools/_distutils/tests/unix_compat.py
+setuptools/_distutils/tests/compat/__init__.py
+setuptools/_distutils/tests/compat/py39.py
+setuptools/_vendor/ruff.toml
+setuptools/_vendor/typing_extensions.py
+setuptools/_vendor/autocommand/__init__.py
+setuptools/_vendor/autocommand/autoasync.py
+setuptools/_vendor/autocommand/autocommand.py
+setuptools/_vendor/autocommand/automain.py
+setuptools/_vendor/autocommand/autoparse.py
+setuptools/_vendor/autocommand/errors.py
+setuptools/_vendor/autocommand-2.2.2.dist-info/INSTALLER
+setuptools/_vendor/autocommand-2.2.2.dist-info/LICENSE
+setuptools/_vendor/autocommand-2.2.2.dist-info/METADATA
+setuptools/_vendor/autocommand-2.2.2.dist-info/RECORD
+setuptools/_vendor/autocommand-2.2.2.dist-info/WHEEL
+setuptools/_vendor/autocommand-2.2.2.dist-info/top_level.txt
+setuptools/_vendor/backports/__init__.py
+setuptools/_vendor/backports.tarfile-1.2.0.dist-info/INSTALLER
+setuptools/_vendor/backports.tarfile-1.2.0.dist-info/LICENSE
+setuptools/_vendor/backports.tarfile-1.2.0.dist-info/METADATA
+setuptools/_vendor/backports.tarfile-1.2.0.dist-info/RECORD
+setuptools/_vendor/backports.tarfile-1.2.0.dist-info/REQUESTED
+setuptools/_vendor/backports.tarfile-1.2.0.dist-info/WHEEL
+setuptools/_vendor/backports.tarfile-1.2.0.dist-info/top_level.txt
+setuptools/_vendor/backports/tarfile/__init__.py
+setuptools/_vendor/backports/tarfile/__main__.py
+setuptools/_vendor/backports/tarfile/compat/__init__.py
+setuptools/_vendor/backports/tarfile/compat/py38.py
+setuptools/_vendor/importlib_metadata/__init__.py
+setuptools/_vendor/importlib_metadata/_adapters.py
+setuptools/_vendor/importlib_metadata/_collections.py
+setuptools/_vendor/importlib_metadata/_compat.py
+setuptools/_vendor/importlib_metadata/_functools.py
+setuptools/_vendor/importlib_metadata/_itertools.py
+setuptools/_vendor/importlib_metadata/_meta.py
+setuptools/_vendor/importlib_metadata/_text.py
+setuptools/_vendor/importlib_metadata/diagnose.py
+setuptools/_vendor/importlib_metadata/py.typed
+setuptools/_vendor/importlib_metadata-8.0.0.dist-info/INSTALLER
+setuptools/_vendor/importlib_metadata-8.0.0.dist-info/LICENSE
+setuptools/_vendor/importlib_metadata-8.0.0.dist-info/METADATA
+setuptools/_vendor/importlib_metadata-8.0.0.dist-info/RECORD
+setuptools/_vendor/importlib_metadata-8.0.0.dist-info/REQUESTED
+setuptools/_vendor/importlib_metadata-8.0.0.dist-info/WHEEL
+setuptools/_vendor/importlib_metadata-8.0.0.dist-info/top_level.txt
+setuptools/_vendor/importlib_metadata/compat/__init__.py
+setuptools/_vendor/importlib_metadata/compat/py311.py
+setuptools/_vendor/importlib_metadata/compat/py39.py
+setuptools/_vendor/inflect/__init__.py
+setuptools/_vendor/inflect/py.typed
+setuptools/_vendor/inflect-7.3.1.dist-info/INSTALLER
+setuptools/_vendor/inflect-7.3.1.dist-info/LICENSE
+setuptools/_vendor/inflect-7.3.1.dist-info/METADATA
+setuptools/_vendor/inflect-7.3.1.dist-info/RECORD
+setuptools/_vendor/inflect-7.3.1.dist-info/WHEEL
+setuptools/_vendor/inflect-7.3.1.dist-info/top_level.txt
+setuptools/_vendor/inflect/compat/__init__.py
+setuptools/_vendor/inflect/compat/py38.py
+setuptools/_vendor/jaraco/context.py
+setuptools/_vendor/jaraco.collections-5.1.0.dist-info/INSTALLER
+setuptools/_vendor/jaraco.collections-5.1.0.dist-info/LICENSE
+setuptools/_vendor/jaraco.collections-5.1.0.dist-info/METADATA
+setuptools/_vendor/jaraco.collections-5.1.0.dist-info/RECORD
+setuptools/_vendor/jaraco.collections-5.1.0.dist-info/REQUESTED
+setuptools/_vendor/jaraco.collections-5.1.0.dist-info/WHEEL
+setuptools/_vendor/jaraco.collections-5.1.0.dist-info/top_level.txt
+setuptools/_vendor/jaraco.context-5.3.0.dist-info/INSTALLER
+setuptools/_vendor/jaraco.context-5.3.0.dist-info/LICENSE
+setuptools/_vendor/jaraco.context-5.3.0.dist-info/METADATA
+setuptools/_vendor/jaraco.context-5.3.0.dist-info/RECORD
+setuptools/_vendor/jaraco.context-5.3.0.dist-info/WHEEL
+setuptools/_vendor/jaraco.context-5.3.0.dist-info/top_level.txt
+setuptools/_vendor/jaraco.functools-4.0.1.dist-info/INSTALLER
+setuptools/_vendor/jaraco.functools-4.0.1.dist-info/LICENSE
+setuptools/_vendor/jaraco.functools-4.0.1.dist-info/METADATA
+setuptools/_vendor/jaraco.functools-4.0.1.dist-info/RECORD
+setuptools/_vendor/jaraco.functools-4.0.1.dist-info/WHEEL
+setuptools/_vendor/jaraco.functools-4.0.1.dist-info/top_level.txt
+setuptools/_vendor/jaraco.text-3.12.1.dist-info/INSTALLER
+setuptools/_vendor/jaraco.text-3.12.1.dist-info/LICENSE
+setuptools/_vendor/jaraco.text-3.12.1.dist-info/METADATA
+setuptools/_vendor/jaraco.text-3.12.1.dist-info/RECORD
+setuptools/_vendor/jaraco.text-3.12.1.dist-info/REQUESTED
+setuptools/_vendor/jaraco.text-3.12.1.dist-info/WHEEL
+setuptools/_vendor/jaraco.text-3.12.1.dist-info/top_level.txt
+setuptools/_vendor/jaraco/collections/__init__.py
+setuptools/_vendor/jaraco/collections/py.typed
+setuptools/_vendor/jaraco/functools/__init__.py
+setuptools/_vendor/jaraco/functools/__init__.pyi
+setuptools/_vendor/jaraco/functools/py.typed
+setuptools/_vendor/jaraco/text/Lorem ipsum.txt
+setuptools/_vendor/jaraco/text/__init__.py
+setuptools/_vendor/jaraco/text/layouts.py
+setuptools/_vendor/jaraco/text/show-newlines.py
+setuptools/_vendor/jaraco/text/strip-prefix.py
+setuptools/_vendor/jaraco/text/to-dvorak.py
+setuptools/_vendor/jaraco/text/to-qwerty.py
+setuptools/_vendor/more_itertools/__init__.py
+setuptools/_vendor/more_itertools/__init__.pyi
+setuptools/_vendor/more_itertools/more.py
+setuptools/_vendor/more_itertools/more.pyi
+setuptools/_vendor/more_itertools/py.typed
+setuptools/_vendor/more_itertools/recipes.py
+setuptools/_vendor/more_itertools/recipes.pyi
+setuptools/_vendor/more_itertools-10.3.0.dist-info/INSTALLER
+setuptools/_vendor/more_itertools-10.3.0.dist-info/LICENSE
+setuptools/_vendor/more_itertools-10.3.0.dist-info/METADATA
+setuptools/_vendor/more_itertools-10.3.0.dist-info/RECORD
+setuptools/_vendor/more_itertools-10.3.0.dist-info/REQUESTED
+setuptools/_vendor/more_itertools-10.3.0.dist-info/WHEEL
+setuptools/_vendor/packaging/__init__.py
+setuptools/_vendor/packaging/_elffile.py
+setuptools/_vendor/packaging/_manylinux.py
+setuptools/_vendor/packaging/_musllinux.py
+setuptools/_vendor/packaging/_parser.py
+setuptools/_vendor/packaging/_structures.py
+setuptools/_vendor/packaging/_tokenizer.py
+setuptools/_vendor/packaging/markers.py
+setuptools/_vendor/packaging/metadata.py
+setuptools/_vendor/packaging/py.typed
+setuptools/_vendor/packaging/requirements.py
+setuptools/_vendor/packaging/specifiers.py
+setuptools/_vendor/packaging/tags.py
+setuptools/_vendor/packaging/utils.py
+setuptools/_vendor/packaging/version.py
+setuptools/_vendor/packaging-24.2.dist-info/INSTALLER
+setuptools/_vendor/packaging-24.2.dist-info/LICENSE
+setuptools/_vendor/packaging-24.2.dist-info/LICENSE.APACHE
+setuptools/_vendor/packaging-24.2.dist-info/LICENSE.BSD
+setuptools/_vendor/packaging-24.2.dist-info/METADATA
+setuptools/_vendor/packaging-24.2.dist-info/RECORD
+setuptools/_vendor/packaging-24.2.dist-info/REQUESTED
+setuptools/_vendor/packaging-24.2.dist-info/WHEEL
+setuptools/_vendor/packaging/licenses/__init__.py
+setuptools/_vendor/packaging/licenses/_spdx.py
+setuptools/_vendor/platformdirs/__init__.py
+setuptools/_vendor/platformdirs/__main__.py
+setuptools/_vendor/platformdirs/android.py
+setuptools/_vendor/platformdirs/api.py
+setuptools/_vendor/platformdirs/macos.py
+setuptools/_vendor/platformdirs/py.typed
+setuptools/_vendor/platformdirs/unix.py
+setuptools/_vendor/platformdirs/version.py
+setuptools/_vendor/platformdirs/windows.py
+setuptools/_vendor/platformdirs-4.2.2.dist-info/INSTALLER
+setuptools/_vendor/platformdirs-4.2.2.dist-info/METADATA
+setuptools/_vendor/platformdirs-4.2.2.dist-info/RECORD
+setuptools/_vendor/platformdirs-4.2.2.dist-info/REQUESTED
+setuptools/_vendor/platformdirs-4.2.2.dist-info/WHEEL
+setuptools/_vendor/platformdirs-4.2.2.dist-info/licenses/LICENSE
+setuptools/_vendor/tomli/__init__.py
+setuptools/_vendor/tomli/_parser.py
+setuptools/_vendor/tomli/_re.py
+setuptools/_vendor/tomli/_types.py
+setuptools/_vendor/tomli/py.typed
+setuptools/_vendor/tomli-2.0.1.dist-info/INSTALLER
+setuptools/_vendor/tomli-2.0.1.dist-info/LICENSE
+setuptools/_vendor/tomli-2.0.1.dist-info/METADATA
+setuptools/_vendor/tomli-2.0.1.dist-info/RECORD
+setuptools/_vendor/tomli-2.0.1.dist-info/REQUESTED
+setuptools/_vendor/tomli-2.0.1.dist-info/WHEEL
+setuptools/_vendor/typeguard/__init__.py
+setuptools/_vendor/typeguard/_checkers.py
+setuptools/_vendor/typeguard/_config.py
+setuptools/_vendor/typeguard/_decorators.py
+setuptools/_vendor/typeguard/_exceptions.py
+setuptools/_vendor/typeguard/_functions.py
+setuptools/_vendor/typeguard/_importhook.py
+setuptools/_vendor/typeguard/_memo.py
+setuptools/_vendor/typeguard/_pytest_plugin.py
+setuptools/_vendor/typeguard/_suppression.py
+setuptools/_vendor/typeguard/_transformer.py
+setuptools/_vendor/typeguard/_union_transformer.py
+setuptools/_vendor/typeguard/_utils.py
+setuptools/_vendor/typeguard/py.typed
+setuptools/_vendor/typeguard-4.3.0.dist-info/INSTALLER
+setuptools/_vendor/typeguard-4.3.0.dist-info/LICENSE
+setuptools/_vendor/typeguard-4.3.0.dist-info/METADATA
+setuptools/_vendor/typeguard-4.3.0.dist-info/RECORD
+setuptools/_vendor/typeguard-4.3.0.dist-info/WHEEL
+setuptools/_vendor/typeguard-4.3.0.dist-info/entry_points.txt
+setuptools/_vendor/typeguard-4.3.0.dist-info/top_level.txt
+setuptools/_vendor/typing_extensions-4.12.2.dist-info/INSTALLER
+setuptools/_vendor/typing_extensions-4.12.2.dist-info/LICENSE
+setuptools/_vendor/typing_extensions-4.12.2.dist-info/METADATA
+setuptools/_vendor/typing_extensions-4.12.2.dist-info/RECORD
+setuptools/_vendor/typing_extensions-4.12.2.dist-info/WHEEL
+setuptools/_vendor/wheel/__init__.py
+setuptools/_vendor/wheel/__main__.py
+setuptools/_vendor/wheel/_setuptools_logging.py
+setuptools/_vendor/wheel/bdist_wheel.py
+setuptools/_vendor/wheel/macosx_libfile.py
+setuptools/_vendor/wheel/metadata.py
+setuptools/_vendor/wheel/util.py
+setuptools/_vendor/wheel/wheelfile.py
+setuptools/_vendor/wheel-0.43.0.dist-info/INSTALLER
+setuptools/_vendor/wheel-0.43.0.dist-info/LICENSE.txt
+setuptools/_vendor/wheel-0.43.0.dist-info/METADATA
+setuptools/_vendor/wheel-0.43.0.dist-info/RECORD
+setuptools/_vendor/wheel-0.43.0.dist-info/REQUESTED
+setuptools/_vendor/wheel-0.43.0.dist-info/WHEEL
+setuptools/_vendor/wheel-0.43.0.dist-info/entry_points.txt
+setuptools/_vendor/wheel/cli/__init__.py
+setuptools/_vendor/wheel/cli/convert.py
+setuptools/_vendor/wheel/cli/pack.py
+setuptools/_vendor/wheel/cli/tags.py
+setuptools/_vendor/wheel/cli/unpack.py
+setuptools/_vendor/wheel/vendored/__init__.py
+setuptools/_vendor/wheel/vendored/vendor.txt
+setuptools/_vendor/wheel/vendored/packaging/__init__.py
+setuptools/_vendor/wheel/vendored/packaging/_elffile.py
+setuptools/_vendor/wheel/vendored/packaging/_manylinux.py
+setuptools/_vendor/wheel/vendored/packaging/_musllinux.py
+setuptools/_vendor/wheel/vendored/packaging/_parser.py
+setuptools/_vendor/wheel/vendored/packaging/_structures.py
+setuptools/_vendor/wheel/vendored/packaging/_tokenizer.py
+setuptools/_vendor/wheel/vendored/packaging/markers.py
+setuptools/_vendor/wheel/vendored/packaging/requirements.py
+setuptools/_vendor/wheel/vendored/packaging/specifiers.py
+setuptools/_vendor/wheel/vendored/packaging/tags.py
+setuptools/_vendor/wheel/vendored/packaging/utils.py
+setuptools/_vendor/wheel/vendored/packaging/version.py
+setuptools/_vendor/zipp/__init__.py
+setuptools/_vendor/zipp/glob.py
+setuptools/_vendor/zipp-3.19.2.dist-info/INSTALLER
+setuptools/_vendor/zipp-3.19.2.dist-info/LICENSE
+setuptools/_vendor/zipp-3.19.2.dist-info/METADATA
+setuptools/_vendor/zipp-3.19.2.dist-info/RECORD
+setuptools/_vendor/zipp-3.19.2.dist-info/REQUESTED
+setuptools/_vendor/zipp-3.19.2.dist-info/WHEEL
+setuptools/_vendor/zipp-3.19.2.dist-info/top_level.txt
+setuptools/_vendor/zipp/compat/__init__.py
+setuptools/_vendor/zipp/compat/py310.py
+setuptools/command/__init__.py
+setuptools/command/_requirestxt.py
+setuptools/command/alias.py
+setuptools/command/bdist_egg.py
+setuptools/command/bdist_rpm.py
+setuptools/command/bdist_wheel.py
+setuptools/command/build.py
+setuptools/command/build_clib.py
+setuptools/command/build_ext.py
+setuptools/command/build_py.py
+setuptools/command/develop.py
+setuptools/command/dist_info.py
+setuptools/command/easy_install.py
+setuptools/command/editable_wheel.py
+setuptools/command/egg_info.py
+setuptools/command/install.py
+setuptools/command/install_egg_info.py
+setuptools/command/install_lib.py
+setuptools/command/install_scripts.py
+setuptools/command/launcher manifest.xml
+setuptools/command/rotate.py
+setuptools/command/saveopts.py
+setuptools/command/sdist.py
+setuptools/command/setopt.py
+setuptools/command/test.py
+setuptools/compat/__init__.py
+setuptools/compat/py310.py
+setuptools/compat/py311.py
+setuptools/compat/py312.py
+setuptools/compat/py39.py
+setuptools/config/NOTICE
+setuptools/config/__init__.py
+setuptools/config/_apply_pyprojecttoml.py
+setuptools/config/distutils.schema.json
+setuptools/config/expand.py
+setuptools/config/pyprojecttoml.py
+setuptools/config/setupcfg.py
+setuptools/config/setuptools.schema.json
+setuptools/config/_validate_pyproject/NOTICE
+setuptools/config/_validate_pyproject/__init__.py
+setuptools/config/_validate_pyproject/error_reporting.py
+setuptools/config/_validate_pyproject/extra_validations.py
+setuptools/config/_validate_pyproject/fastjsonschema_exceptions.py
+setuptools/config/_validate_pyproject/fastjsonschema_validations.py
+setuptools/config/_validate_pyproject/formats.py
+setuptools/tests/__init__.py
+setuptools/tests/contexts.py
+setuptools/tests/environment.py
+setuptools/tests/fixtures.py
+setuptools/tests/mod_with_constant.py
+setuptools/tests/namespaces.py
+setuptools/tests/script-with-bom.py
+setuptools/tests/server.py
+setuptools/tests/test_archive_util.py
+setuptools/tests/test_bdist_deprecations.py
+setuptools/tests/test_bdist_egg.py
+setuptools/tests/test_bdist_wheel.py
+setuptools/tests/test_build.py
+setuptools/tests/test_build_clib.py
+setuptools/tests/test_build_ext.py
+setuptools/tests/test_build_meta.py
+setuptools/tests/test_build_py.py
+setuptools/tests/test_config_discovery.py
+setuptools/tests/test_core_metadata.py
+setuptools/tests/test_depends.py
+setuptools/tests/test_develop.py
+setuptools/tests/test_dist.py
+setuptools/tests/test_dist_info.py
+setuptools/tests/test_distutils_adoption.py
+setuptools/tests/test_easy_install.py
+setuptools/tests/test_editable_install.py
+setuptools/tests/test_egg_info.py
+setuptools/tests/test_extern.py
+setuptools/tests/test_find_packages.py
+setuptools/tests/test_find_py_modules.py
+setuptools/tests/test_glob.py
+setuptools/tests/test_install_scripts.py
+setuptools/tests/test_logging.py
+setuptools/tests/test_manifest.py
+setuptools/tests/test_namespaces.py
+setuptools/tests/test_packageindex.py
+setuptools/tests/test_sandbox.py
+setuptools/tests/test_sdist.py
+setuptools/tests/test_setopt.py
+setuptools/tests/test_setuptools.py
+setuptools/tests/test_shutil_wrapper.py
+setuptools/tests/test_unicode_utils.py
+setuptools/tests/test_virtualenv.py
+setuptools/tests/test_warnings.py
+setuptools/tests/test_wheel.py
+setuptools/tests/test_windows_wrappers.py
+setuptools/tests/text.py
+setuptools/tests/textwrap.py
+setuptools/tests/compat/__init__.py
+setuptools/tests/compat/py39.py
+setuptools/tests/config/__init__.py
+setuptools/tests/config/setupcfg_examples.txt
+setuptools/tests/config/test_apply_pyprojecttoml.py
+setuptools/tests/config/test_expand.py
+setuptools/tests/config/test_pyprojecttoml.py
+setuptools/tests/config/test_pyprojecttoml_dynamic_deps.py
+setuptools/tests/config/test_setupcfg.py
+setuptools/tests/config/downloads/__init__.py
+setuptools/tests/config/downloads/preload.py
+setuptools/tests/indexes/test_links_priority/external.html
+setuptools/tests/indexes/test_links_priority/simple/foobar/index.html
+setuptools/tests/integration/__init__.py
+setuptools/tests/integration/helpers.py
+setuptools/tests/integration/test_pip_install_sdist.py
+tools/build_launchers.py
+tools/finalize.py
+tools/generate_validation_code.py
+tools/vendored.py
\ No newline at end of file
diff --git a/parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/dependency_links.txt b/parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/dependency_links.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/entry_points.txt b/parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/entry_points.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0db0a6c8f1b8d9c0ad4a25db6892e29f8988fcf2
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/entry_points.txt
@@ -0,0 +1,51 @@
+[distutils.commands]
+alias = setuptools.command.alias:alias
+bdist_egg = setuptools.command.bdist_egg:bdist_egg
+bdist_rpm = setuptools.command.bdist_rpm:bdist_rpm
+bdist_wheel = setuptools.command.bdist_wheel:bdist_wheel
+build = setuptools.command.build:build
+build_clib = setuptools.command.build_clib:build_clib
+build_ext = setuptools.command.build_ext:build_ext
+build_py = setuptools.command.build_py:build_py
+develop = setuptools.command.develop:develop
+dist_info = setuptools.command.dist_info:dist_info
+easy_install = setuptools.command.easy_install:easy_install
+editable_wheel = setuptools.command.editable_wheel:editable_wheel
+egg_info = setuptools.command.egg_info:egg_info
+install = setuptools.command.install:install
+install_egg_info = setuptools.command.install_egg_info:install_egg_info
+install_lib = setuptools.command.install_lib:install_lib
+install_scripts = setuptools.command.install_scripts:install_scripts
+rotate = setuptools.command.rotate:rotate
+saveopts = setuptools.command.saveopts:saveopts
+sdist = setuptools.command.sdist:sdist
+setopt = setuptools.command.setopt:setopt
+
+[distutils.setup_keywords]
+dependency_links = setuptools.dist:assert_string_list
+eager_resources = setuptools.dist:assert_string_list
+entry_points = setuptools.dist:check_entry_points
+exclude_package_data = setuptools.dist:check_package_data
+extras_require = setuptools.dist:check_extras
+include_package_data = setuptools.dist:assert_bool
+install_requires = setuptools.dist:check_requirements
+namespace_packages = setuptools.dist:check_nsp
+package_data = setuptools.dist:check_package_data
+packages = setuptools.dist:check_packages
+python_requires = setuptools.dist:check_specifier
+setup_requires = setuptools.dist:check_requirements
+use_2to3 = setuptools.dist:invalid_unless_false
+zip_safe = setuptools.dist:assert_bool
+
+[egg_info.writers]
+PKG-INFO = setuptools.command.egg_info:write_pkg_info
+dependency_links.txt = setuptools.command.egg_info:overwrite_arg
+eager_resources.txt = setuptools.command.egg_info:overwrite_arg
+entry_points.txt = setuptools.command.egg_info:write_entries
+namespace_packages.txt = setuptools.command.egg_info:overwrite_arg
+requires.txt = setuptools.command.egg_info:write_requirements
+top_level.txt = setuptools.command.egg_info:write_toplevel_names
+
+[setuptools.finalize_distribution_options]
+keywords = setuptools.dist:Distribution._finalize_setup_keywords
+parent_finalize = setuptools.dist:_Distribution.finalize_options
diff --git a/parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/requires.txt b/parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/requires.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4d40327a3cb30ab8ec29d99bb3f8a785174ae689
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/requires.txt
@@ -0,0 +1,85 @@
+
+[certs]
+
+[check]
+pytest-checkdocs>=2.4
+
+[check:sys_platform != "cygwin"]
+pytest-ruff>=0.2.1
+ruff>=0.8.0
+
+[core]
+packaging>=24.2
+more_itertools>=8.8
+jaraco.text>=3.7
+wheel>=0.43.0
+platformdirs>=4.2.2
+jaraco.collections
+jaraco.functools>=4
+packaging
+more_itertools
+
+[core:python_version < "3.10"]
+importlib_metadata>=6
+
+[core:python_version < "3.11"]
+tomli>=2.0.1
+
+[cover]
+pytest-cov
+
+[doc]
+sphinx>=3.5
+jaraco.packaging>=9.3
+rst.linker>=1.9
+furo
+sphinx-lint
+jaraco.tidelift>=1.4
+pygments-github-lexers==0.0.5
+sphinx-favicon
+sphinx-inline-tabs
+sphinx-reredirects
+sphinxcontrib-towncrier
+sphinx-notfound-page<2,>=1
+pyproject-hooks!=1.1
+towncrier<24.7
+
+[enabler]
+pytest-enabler>=2.2
+
+[ssl]
+
+[test]
+pytest!=8.1.*,>=6
+virtualenv>=13.0.0
+wheel>=0.44.0
+pip>=19.1
+packaging>=24.2
+jaraco.envs>=2.2
+pytest-xdist>=3
+jaraco.path>=3.7.2
+build[virtualenv]>=1.0.3
+filelock>=3.4.0
+ini2toml[lite]>=0.14
+tomli-w>=1.0.0
+pytest-timeout
+pytest-home>=0.5
+pytest-subprocess
+pyproject-hooks!=1.1
+jaraco.test>=5.5
+
+[test:python_version >= "3.9" and sys_platform != "cygwin"]
+jaraco.develop>=7.21
+
+[test:sys_platform != "cygwin"]
+pytest-perf
+
+[type]
+pytest-mypy
+mypy==1.14.*
+
+[type:python_version < "3.10"]
+importlib_metadata>=7.0.2
+
+[type:sys_platform != "cygwin"]
+jaraco.develop>=7.21
diff --git a/parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/top_level.txt b/parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b5ac1070294b478b7cc2ce677207ee08813bfa37
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/top_level.txt
@@ -0,0 +1,3 @@
+_distutils_hack
+pkg_resources
+setuptools
diff --git a/parrot/lib/python3.10/site-packages/shellingham/__init__.py b/parrot/lib/python3.10/site-packages/shellingham/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..15f7a90cbd02e5c2cc933cf6aa0374cca68035f1
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/shellingham/__init__.py
@@ -0,0 +1,23 @@
+import importlib
+import os
+
+from ._core import ShellDetectionFailure
+
+__version__ = "1.5.4"
+
+
+def detect_shell(pid=None, max_depth=10):
+ name = os.name
+ try:
+ impl = importlib.import_module(".{}".format(name), __name__)
+ except ImportError:
+ message = "Shell detection not implemented for {0!r}".format(name)
+ raise RuntimeError(message)
+ try:
+ get_shell = impl.get_shell
+ except AttributeError:
+ raise RuntimeError("get_shell not implemented for {0!r}".format(name))
+ shell = get_shell(pid, max_depth=max_depth)
+ if shell:
+ return shell
+ raise ShellDetectionFailure()
diff --git a/parrot/lib/python3.10/site-packages/shellingham/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/shellingham/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a64d27d8040b6861ac1115601435de900598dda2
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/shellingham/__pycache__/__init__.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/shellingham/__pycache__/_core.cpython-310.pyc b/parrot/lib/python3.10/site-packages/shellingham/__pycache__/_core.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..abf2194cd7063a4ab61f80d30e1d5a2e16e9a0ed
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/shellingham/__pycache__/_core.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/shellingham/__pycache__/nt.cpython-310.pyc b/parrot/lib/python3.10/site-packages/shellingham/__pycache__/nt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4d24355b1906c2a7d990e872dbafad258661371d
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/shellingham/__pycache__/nt.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/shellingham/_core.py b/parrot/lib/python3.10/site-packages/shellingham/_core.py
new file mode 100644
index 0000000000000000000000000000000000000000..13b65417c733b54e48b120e37f573c2baa6ef72b
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/shellingham/_core.py
@@ -0,0 +1,11 @@
+SHELL_NAMES = (
+ {"sh", "bash", "dash", "ash"} # Bourne.
+ | {"csh", "tcsh"} # C.
+ | {"ksh", "zsh", "fish"} # Common alternatives.
+ | {"cmd", "powershell", "pwsh"} # Microsoft.
+ | {"elvish", "xonsh", "nu"} # More exotic.
+)
+
+
+class ShellDetectionFailure(EnvironmentError):
+ pass
diff --git a/parrot/lib/python3.10/site-packages/shellingham/nt.py b/parrot/lib/python3.10/site-packages/shellingham/nt.py
new file mode 100644
index 0000000000000000000000000000000000000000..389551b223a761fa2f97e929b60bf3ca5baed94c
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/shellingham/nt.py
@@ -0,0 +1,163 @@
+import contextlib
+import ctypes
+import os
+
+from ctypes.wintypes import (
+ BOOL,
+ CHAR,
+ DWORD,
+ HANDLE,
+ LONG,
+ LPWSTR,
+ MAX_PATH,
+ PDWORD,
+ ULONG,
+)
+
+from shellingham._core import SHELL_NAMES
+
+
+INVALID_HANDLE_VALUE = HANDLE(-1).value
+ERROR_NO_MORE_FILES = 18
+ERROR_INSUFFICIENT_BUFFER = 122
+TH32CS_SNAPPROCESS = 2
+PROCESS_QUERY_LIMITED_INFORMATION = 0x1000
+
+
+kernel32 = ctypes.windll.kernel32
+
+
+def _check_handle(error_val=0):
+ def check(ret, func, args):
+ if ret == error_val:
+ raise ctypes.WinError()
+ return ret
+
+ return check
+
+
+def _check_expected(expected):
+ def check(ret, func, args):
+ if ret:
+ return True
+ code = ctypes.GetLastError()
+ if code == expected:
+ return False
+ raise ctypes.WinError(code)
+
+ return check
+
+
+class ProcessEntry32(ctypes.Structure):
+ _fields_ = (
+ ("dwSize", DWORD),
+ ("cntUsage", DWORD),
+ ("th32ProcessID", DWORD),
+ ("th32DefaultHeapID", ctypes.POINTER(ULONG)),
+ ("th32ModuleID", DWORD),
+ ("cntThreads", DWORD),
+ ("th32ParentProcessID", DWORD),
+ ("pcPriClassBase", LONG),
+ ("dwFlags", DWORD),
+ ("szExeFile", CHAR * MAX_PATH),
+ )
+
+
+kernel32.CloseHandle.argtypes = [HANDLE]
+kernel32.CloseHandle.restype = BOOL
+
+kernel32.CreateToolhelp32Snapshot.argtypes = [DWORD, DWORD]
+kernel32.CreateToolhelp32Snapshot.restype = HANDLE
+kernel32.CreateToolhelp32Snapshot.errcheck = _check_handle( # type: ignore
+ INVALID_HANDLE_VALUE,
+)
+
+kernel32.Process32First.argtypes = [HANDLE, ctypes.POINTER(ProcessEntry32)]
+kernel32.Process32First.restype = BOOL
+kernel32.Process32First.errcheck = _check_expected( # type: ignore
+ ERROR_NO_MORE_FILES,
+)
+
+kernel32.Process32Next.argtypes = [HANDLE, ctypes.POINTER(ProcessEntry32)]
+kernel32.Process32Next.restype = BOOL
+kernel32.Process32Next.errcheck = _check_expected( # type: ignore
+ ERROR_NO_MORE_FILES,
+)
+
+kernel32.GetCurrentProcessId.argtypes = []
+kernel32.GetCurrentProcessId.restype = DWORD
+
+kernel32.OpenProcess.argtypes = [DWORD, BOOL, DWORD]
+kernel32.OpenProcess.restype = HANDLE
+kernel32.OpenProcess.errcheck = _check_handle( # type: ignore
+ INVALID_HANDLE_VALUE,
+)
+
+kernel32.QueryFullProcessImageNameW.argtypes = [HANDLE, DWORD, LPWSTR, PDWORD]
+kernel32.QueryFullProcessImageNameW.restype = BOOL
+kernel32.QueryFullProcessImageNameW.errcheck = _check_expected( # type: ignore
+ ERROR_INSUFFICIENT_BUFFER,
+)
+
+
+@contextlib.contextmanager
+def _handle(f, *args, **kwargs):
+ handle = f(*args, **kwargs)
+ try:
+ yield handle
+ finally:
+ kernel32.CloseHandle(handle)
+
+
+def _iter_processes():
+ f = kernel32.CreateToolhelp32Snapshot
+ with _handle(f, TH32CS_SNAPPROCESS, 0) as snap:
+ entry = ProcessEntry32()
+ entry.dwSize = ctypes.sizeof(entry)
+ ret = kernel32.Process32First(snap, entry)
+ while ret:
+ yield entry
+ ret = kernel32.Process32Next(snap, entry)
+
+
+def _get_full_path(proch):
+ size = DWORD(MAX_PATH)
+ while True:
+ path_buff = ctypes.create_unicode_buffer("", size.value)
+ if kernel32.QueryFullProcessImageNameW(proch, 0, path_buff, size):
+ return path_buff.value
+ size.value *= 2
+
+
+def get_shell(pid=None, max_depth=10):
+ proc_map = {
+ proc.th32ProcessID: (proc.th32ParentProcessID, proc.szExeFile)
+ for proc in _iter_processes()
+ }
+ pid = pid or os.getpid()
+
+ for _ in range(0, max_depth + 1):
+ try:
+ ppid, executable = proc_map[pid]
+ except KeyError: # No such process? Give up.
+ break
+
+ # The executable name would be encoded with the current code page if
+ # we're in ANSI mode (usually). Try to decode it into str/unicode,
+ # replacing invalid characters to be safe (not thoeratically necessary,
+ # I think). Note that we need to use 'mbcs' instead of encoding
+ # settings from sys because this is from the Windows API, not Python
+ # internals (which those settings reflect). (pypa/pipenv#3382)
+ if isinstance(executable, bytes):
+ executable = executable.decode("mbcs", "replace")
+
+ name = executable.rpartition(".")[0].lower()
+ if name not in SHELL_NAMES:
+ pid = ppid
+ continue
+
+ key = PROCESS_QUERY_LIMITED_INFORMATION
+ with _handle(kernel32.OpenProcess, key, 0, pid) as proch:
+ return (name, _get_full_path(proch))
+
+ return None
diff --git a/parrot/lib/python3.10/site-packages/shellingham/posix/__init__.py b/parrot/lib/python3.10/site-packages/shellingham/posix/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5bd2070db27189e62a1867e4de49f16f8c8841ff
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/shellingham/posix/__init__.py
@@ -0,0 +1,112 @@
+import os
+import re
+
+from .._core import SHELL_NAMES, ShellDetectionFailure
+from . import proc, ps
+
+# Based on QEMU docs: https://www.qemu.org/docs/master/user/main.html
+QEMU_BIN_REGEX = re.compile(
+ r"""qemu-
+ (alpha
+ |armeb
+ |arm
+ |m68k
+ |cris
+ |i386
+ |x86_64
+ |microblaze
+ |mips
+ |mipsel
+ |mips64
+ |mips64el
+ |mipsn32
+ |mipsn32el
+ |nios2
+ |ppc64
+ |ppc
+ |sh4eb
+ |sh4
+ |sparc
+ |sparc32plus
+ |sparc64
+ )""",
+ re.VERBOSE,
+)
+
+
+def _iter_process_parents(pid, max_depth=10):
+ """Select a way to obtain process information from the system.
+
+ * `/proc` is used if supported.
+ * The system `ps` utility is used as a fallback option.
+ """
+ for impl in (proc, ps):
+ try:
+ iterator = impl.iter_process_parents(pid, max_depth)
+ except EnvironmentError:
+ continue
+ return iterator
+ raise ShellDetectionFailure("compatible proc fs or ps utility is required")
+
+
+def _get_login_shell(proc_cmd):
+ """Form shell information from SHELL environ if possible."""
+ login_shell = os.environ.get("SHELL", "")
+ if login_shell:
+ proc_cmd = login_shell
+ else:
+ proc_cmd = proc_cmd[1:]
+ return (os.path.basename(proc_cmd).lower(), proc_cmd)
+
+
+_INTERPRETER_SHELL_NAMES = [
+ (re.compile(r"^python(\d+(\.\d+)?)?$"), {"xonsh"}),
+]
+
+
+def _get_interpreter_shell(proc_name, proc_args):
+ """Get shell invoked via an interpreter.
+
+ Some shells are implemented on, and invoked with an interpreter, e.g. xonsh
+ is commonly executed with an executable Python script. This detects what
+ script the interpreter is actually running, and check whether that looks
+ like a shell.
+
+ See sarugaku/shellingham#26 for rational.
+ """
+ for pattern, shell_names in _INTERPRETER_SHELL_NAMES:
+ if not pattern.match(proc_name):
+ continue
+ for arg in proc_args:
+ name = os.path.basename(arg).lower()
+ if os.path.isfile(arg) and name in shell_names:
+ return (name, arg)
+ return None
+
+
+def _get_shell(cmd, *args):
+ if cmd.startswith("-"): # Login shell! Let's use this.
+ return _get_login_shell(cmd)
+ name = os.path.basename(cmd).lower()
+ if name == "rosetta" or QEMU_BIN_REGEX.fullmatch(name):
+ # If the current process is Rosetta or QEMU, this likely is a
+ # containerized process. Parse out the actual command instead.
+ cmd = args[0]
+ args = args[1:]
+ name = os.path.basename(cmd).lower()
+ if name in SHELL_NAMES: # Command looks like a shell.
+ return (name, cmd)
+ shell = _get_interpreter_shell(name, args)
+ if shell:
+ return shell
+ return None
+
+
+def get_shell(pid=None, max_depth=10):
+ """Get the shell that the supplied pid or os.getpid() is running in."""
+ pid = str(pid or os.getpid())
+ for proc_args, _, _ in _iter_process_parents(pid, max_depth):
+ shell = _get_shell(*proc_args)
+ if shell:
+ return shell
+ return None
diff --git a/parrot/lib/python3.10/site-packages/shellingham/posix/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/shellingham/posix/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..288260fb845f0c942f760cb8e99bff62aa2a91ba
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/shellingham/posix/__pycache__/__init__.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/shellingham/posix/__pycache__/_core.cpython-310.pyc b/parrot/lib/python3.10/site-packages/shellingham/posix/__pycache__/_core.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9aef175c80ef1f1426ac5e96df4c8aeacdbf97ac
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/shellingham/posix/__pycache__/_core.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/shellingham/posix/__pycache__/proc.cpython-310.pyc b/parrot/lib/python3.10/site-packages/shellingham/posix/__pycache__/proc.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dee709b4aef97dd63679614a0ba1114bedd31df3
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/shellingham/posix/__pycache__/proc.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/shellingham/posix/__pycache__/ps.cpython-310.pyc b/parrot/lib/python3.10/site-packages/shellingham/posix/__pycache__/ps.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..64a9323d3941d4891d3e09589d2ecfb759bf69f0
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/shellingham/posix/__pycache__/ps.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/shellingham/posix/_core.py b/parrot/lib/python3.10/site-packages/shellingham/posix/_core.py
new file mode 100644
index 0000000000000000000000000000000000000000..adc49e6e7a9d3edf062c55e0078136899f78d30d
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/shellingham/posix/_core.py
@@ -0,0 +1,3 @@
+import collections
+
+Process = collections.namedtuple("Process", "args pid ppid")
diff --git a/parrot/lib/python3.10/site-packages/shellingham/posix/proc.py b/parrot/lib/python3.10/site-packages/shellingham/posix/proc.py
new file mode 100644
index 0000000000000000000000000000000000000000..950f63228e5b328f82b70da8851ec60c6a2ff029
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/shellingham/posix/proc.py
@@ -0,0 +1,83 @@
+import io
+import os
+import re
+import sys
+
+from ._core import Process
+
+# FreeBSD: https://www.freebsd.org/cgi/man.cgi?query=procfs
+# NetBSD: https://man.netbsd.org/NetBSD-9.3-STABLE/mount_procfs.8
+# DragonFlyBSD: https://www.dragonflybsd.org/cgi/web-man?command=procfs
+BSD_STAT_PPID = 2
+
+# See https://docs.kernel.org/filesystems/proc.html
+LINUX_STAT_PPID = 3
+
+STAT_PATTERN = re.compile(r"\(.+\)|\S+")
+
+
+def detect_proc():
+ """Detect /proc filesystem style.
+
+ This checks the /proc/{pid} directory for possible formats. Returns one of
+ the following as str:
+
+ * `stat`: Linux-style, i.e. ``/proc/{pid}/stat``.
+ * `status`: BSD-style, i.e. ``/proc/{pid}/status``.
+ """
+ pid = os.getpid()
+ for name in ("stat", "status"):
+ if os.path.exists(os.path.join("/proc", str(pid), name)):
+ return name
+ raise ProcFormatError("unsupported proc format")
+
+
+def _use_bsd_stat_format():
+ try:
+ return os.uname().sysname.lower() in ("freebsd", "netbsd", "dragonfly")
+ except Exception:
+ return False
+
+
+def _get_ppid(pid, name):
+ path = os.path.join("/proc", str(pid), name)
+ with io.open(path, encoding="ascii", errors="replace") as f:
+ parts = STAT_PATTERN.findall(f.read())
+ # We only care about TTY and PPID -- both are numbers.
+ if _use_bsd_stat_format():
+ return parts[BSD_STAT_PPID]
+ return parts[LINUX_STAT_PPID]
+
+
+def _get_cmdline(pid):
+ path = os.path.join("/proc", str(pid), "cmdline")
+ encoding = sys.getfilesystemencoding() or "utf-8"
+ with io.open(path, encoding=encoding, errors="replace") as f:
+ # XXX: Command line arguments can be arbitrary byte sequences, not
+ # necessarily decodable. For Shellingham's purpose, however, we don't
+ # care. (pypa/pipenv#2820)
+ # cmdline appends an extra NULL at the end, hence the [:-1].
+ return tuple(f.read().split("\0")[:-1])
+
+
+class ProcFormatError(EnvironmentError):
+ pass
+
+
+def iter_process_parents(pid, max_depth=10):
+ """Try to look up the process tree via the /proc interface."""
+ stat_name = detect_proc()
+
+ # Inner generator function so we correctly throw an error eagerly if proc
+ # is not supported, rather than on the first call to the iterator. This
+ # allows the call site detects the correct implementation.
+ def _iter_process_parents(pid, max_depth):
+ for _ in range(max_depth):
+ ppid = _get_ppid(pid, stat_name)
+ args = _get_cmdline(pid)
+ yield Process(args=args, pid=pid, ppid=ppid)
+ if ppid == "0":
+ break
+ pid = ppid
+
+ return _iter_process_parents(pid, max_depth)
diff --git a/parrot/lib/python3.10/site-packages/shellingham/posix/ps.py b/parrot/lib/python3.10/site-packages/shellingham/posix/ps.py
new file mode 100644
index 0000000000000000000000000000000000000000..3bc39a74a56390c263e63bfead028f6bce4df3cb
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/shellingham/posix/ps.py
@@ -0,0 +1,51 @@
+import errno
+import subprocess
+import sys
+
+from ._core import Process
+
+
+class PsNotAvailable(EnvironmentError):
+ pass
+
+
+def iter_process_parents(pid, max_depth=10):
+ """Try to look up the process tree via the output of `ps`."""
+ try:
+ cmd = ["ps", "-ww", "-o", "pid=", "-o", "ppid=", "-o", "args="]
+ output = subprocess.check_output(cmd)
+ except OSError as e: # Python 2-compatible FileNotFoundError.
+ if e.errno != errno.ENOENT:
+ raise
+ raise PsNotAvailable("ps not found")
+ except subprocess.CalledProcessError as e:
+ # `ps` can return 1 if the process list is completely empty.
+ # (sarugaku/shellingham#15)
+ if not e.output.strip():
+ return
+ raise
+ if not isinstance(output, str):
+ encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
+ output = output.decode(encoding)
+
+ processes_mapping = {}
+ for line in output.split("\n"):
+ try:
+ _pid, ppid, args = line.strip().split(None, 2)
+ # XXX: This is not right, but we are really out of options.
+ # ps does not offer a sane way to decode the argument display,
+ # and this is "Good Enough" for obtaining shell names. Hopefully
+ # people don't name their shell with a space, or have something
+ # like "/usr/bin/xonsh is uber". (sarugaku/shellingham#14)
+ args = tuple(a.strip() for a in args.split(" "))
+ except ValueError:
+ continue
+ processes_mapping[_pid] = Process(args=args, pid=_pid, ppid=ppid)
+
+ for _ in range(max_depth):
+ try:
+ process = processes_mapping[pid]
+ except KeyError:
+ return
+ yield process
+ pid = process.ppid
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing-0.1.6.dist-info/LICENSE b/parrot/lib/python3.10/site-packages/simple_parsing-0.1.6.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..2a3f2a4ab0a044859f260e7961889deaa7144b4c
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing-0.1.6.dist-info/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2019 Fabrice Normandin
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing-0.1.6.dist-info/METADATA b/parrot/lib/python3.10/site-packages/simple_parsing-0.1.6.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..0e001c67923d354ad655f6a8cfdf51198de8830c
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing-0.1.6.dist-info/METADATA
@@ -0,0 +1,163 @@
+Metadata-Version: 2.1
+Name: simple-parsing
+Version: 0.1.6
+Summary: A small utility for simplifying and cleaning up argument parsing scripts.
+License: MIT
+Author: Fabrice Normandin
+Author-email: fabrice.normandin@gmail.com
+Requires-Python: >=3.8,<4.0
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Provides-Extra: toml
+Provides-Extra: yaml
+Requires-Dist: docstring-parser (>=0.15,<1.0)
+Requires-Dist: pyyaml (>=6.0.1,<7.0.0) ; extra == "yaml"
+Requires-Dist: tomli (>=2.0.1,<3.0.0) ; extra == "toml"
+Requires-Dist: tomli-w (>=1.0.0,<2.0.0) ; extra == "toml"
+Requires-Dist: typing-extensions (>=4.5.0)
+Description-Content-Type: text/markdown
+
+ [](https://badge.fury.io/py/simple-parsing)
+
+# Simple, Elegant, Typed Argument Parsing
+
+`simple-parsing` allows you to transform your ugly `argparse` scripts into beautifully structured, strongly typed little works of art. This isn't a fancy, complicated new command-line tool either, ***this simply adds new features to plain-old argparse!***
+Using [dataclasses](https://docs.python.org/3.7/library/dataclasses.html), `simple-parsing` makes it easier to share and reuse command-line arguments - ***no more copy pasting!***
+
+Supports inheritance, **nesting**, easy serialization to json/yaml, automatic help strings from comments, and much more!
+
+```python
+# examples/demo.py
+from dataclasses import dataclass
+from simple_parsing import ArgumentParser
+
+parser = ArgumentParser()
+parser.add_argument("--foo", type=int, default=123, help="foo help")
+
+@dataclass
+class Options:
+ """ Help string for this group of command-line arguments """
+ log_dir: str # Help string for a required str argument
+ learning_rate: float = 1e-4 # Help string for a float argument
+
+parser.add_arguments(Options, dest="options")
+
+args = parser.parse_args()
+print("foo:", args.foo)
+print("options:", args.options)
+```
+
+```console
+$ python examples/demo.py --log_dir logs --foo 123
+foo: 123
+options: Options(log_dir='logs', learning_rate=0.0001)
+```
+
+```console
+$ python examples/demo.py --help
+usage: demo.py [-h] [--foo int] --log_dir str [--learning_rate float]
+
+optional arguments:
+ -h, --help show this help message and exit
+ --foo int foo help (default: 123)
+
+Options ['options']:
+ Help string for this group of command-line arguments
+
+ --log_dir str Help string for a required str argument (default:
+ None)
+ --learning_rate float
+ Help string for a float argument (default: 0.0001)
+```
+
+### (*new*) Simplified API:
+
+For a simple use-case, where you only want to parse a single dataclass, you can use the `simple_parsing.parse` or `simple_parsing.parse_known_args` functions:
+
+```python
+options: Options = simple_parsing.parse(Options)
+# or:
+options, leftover_args = simple_parsing.parse_known_args(Options)
+```
+
+## installation
+
+`pip install simple-parsing`
+
+## [Examples](https://github.com/lebrice/SimpleParsing/tree/master/examples/README.md)
+
+## [API Documentation](https://github.com/lebrice/SimpleParsing/tree/master/docs/README.md) (Under construction)
+
+## Features
+
+- ### [Automatic "--help" strings](https://github.com/lebrice/SimpleParsing/tree/master/examples/docstrings/README.md)
+
+ As developers, we want to make it easy for people coming into our projects to understand how to run them. However, a user-friendly `--help` message is often hard to write and to maintain, especially as the number of arguments increases.
+
+ With `simple-parsing`, your arguments and their descriptions are defined in the same place, making your code easier to read, write, and maintain.
+
+- ### Modular, Reusable, Cleanly Grouped Arguments
+
+ *(no more copy-pasting)*
+
+ When you need to add a new group of command-line arguments similar to an existing one, instead of copy-pasting a block of `argparse` code and renaming variables, you can reuse your argument class, and let the `ArgumentParser` take care of adding relevant prefixes to the arguments for you:
+
+ ```python
+ parser.add_arguments(Options, dest="train")
+ parser.add_arguments(Options, dest="valid")
+ args = parser.parse_args()
+ train_options: Options = args.train
+ valid_options: Options = args.valid
+ print(train_options)
+ print(valid_options)
+ ```
+
+ ```console
+ $ python examples/demo.py \
+ --train.log_dir "training" \
+ --valid.log_dir "validation"
+ Options(log_dir='training', learning_rate=0.0001)
+ Options(log_dir='validation', learning_rate=0.0001)
+ ```
+
+ These prefixes can also be set explicitly, or not be used at all. For more info, take a look at the [Prefixing Guide](https://github.com/lebrice/SimpleParsing/tree/master/examples/prefixing/README.md)
+
+- ### [Argument subgroups](https://github.com/lebrice/SimpleParsing/tree/master/examples/subgroups/README.md)
+
+ It's easy to choose between different argument groups of arguments, with the `subgroups`
+ function!
+
+- ### [Setting defaults from Configuration files](https://github.com/lebrice/SimpleParsing/tree/master/examples/config_files/README.md)
+
+ Default values for command-line arguments can easily be read from many different formats, including json/yaml!
+
+- ### [**Easy serialization**](https://github.com/lebrice/SimpleParsing/tree/master/examples/serialization/README.md):
+
+ Easily save/load configs to `json` or `yaml`!.
+
+- ### [**Inheritance**!](https://github.com/lebrice/SimpleParsing/tree/master/examples/inheritance/README.md)
+
+ You can easily customize an existing argument class by extending it and adding your own attributes, which helps promote code reuse across projects. For more info, take a look at the [inheritance example](https://github.com/lebrice/SimpleParsing/tree/master/examples/inheritance/inheritance_example.py)
+
+- ### [**Nesting**!](https://github.com/lebrice/SimpleParsing/tree/master/examples/nesting/README.md):
+
+ Dataclasses can be nested within dataclasses, as deep as you need!
+
+- ### [Easier parsing of lists and tuples](https://github.com/lebrice/SimpleParsing/tree/master/examples/container_types/README.md) :
+
+ This is sometimes tricky to do with regular `argparse`, but `simple-parsing` makes it a lot easier by using the python's builtin type annotations to automatically convert the values to the right type for you.
+ As an added feature, by using these type annotations, `simple-parsing` allows you to parse nested lists or tuples, as can be seen in [this example](https://github.com/lebrice/SimpleParsing/tree/master/examples/merging/README.md)
+
+- ### [Enums support](https://github.com/lebrice/SimpleParsing/tree/master/examples/enums/README.md)
+
+- (More to come!)
+
+## Examples:
+
+Additional examples for all the features mentioned above can be found in the [examples folder](https://github.com/lebrice/SimpleParsing/tree/master/examples/README.md)
+
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing-0.1.6.dist-info/RECORD b/parrot/lib/python3.10/site-packages/simple_parsing-0.1.6.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..e505072b6f0c226144befcebfcf1726bbd86274c
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing-0.1.6.dist-info/RECORD
@@ -0,0 +1,77 @@
+simple_parsing-0.1.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+simple_parsing-0.1.6.dist-info/LICENSE,sha256=iF8lpUzEyV5BW0Ayv_llqKKna-0a13TaaDkU7SnAD78,1074
+simple_parsing-0.1.6.dist-info/METADATA,sha256=tyLNML0F2A2TGibvRdK0fLjZYd1tfq77YlKaHqI6P3E,7265
+simple_parsing-0.1.6.dist-info/RECORD,,
+simple_parsing-0.1.6.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+simple_parsing-0.1.6.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
+simple_parsing/__init__.py,sha256=wIXUD_zCjaMlgCEv7R6QMmVAJyfjCnDOanAYe_L27YI,1157
+simple_parsing/__pycache__/__init__.cpython-310.pyc,,
+simple_parsing/__pycache__/conflicts.cpython-310.pyc,,
+simple_parsing/__pycache__/decorators.cpython-310.pyc,,
+simple_parsing/__pycache__/docstring.cpython-310.pyc,,
+simple_parsing/__pycache__/help_formatter.cpython-310.pyc,,
+simple_parsing/__pycache__/parsing.cpython-310.pyc,,
+simple_parsing/__pycache__/replace.cpython-310.pyc,,
+simple_parsing/__pycache__/utils.cpython-310.pyc,,
+simple_parsing/annotation_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+simple_parsing/annotation_utils/__pycache__/__init__.cpython-310.pyc,,
+simple_parsing/annotation_utils/__pycache__/get_field_annotations.cpython-310.pyc,,
+simple_parsing/annotation_utils/get_field_annotations.py,sha256=CPws2qOBfXwxNqjGASvWRwS061F1y8CAcDk93-YhvpI,10620
+simple_parsing/conflicts.py,sha256=7ra6SPxiPHB6ZlrBArd1FCTX-yE7ClhyTCaUO8StWeE,17163
+simple_parsing/decorators.py,sha256=6ioKbJYDSbB_GclGmPUiJTD3RskNMVo48ja-rvinUJw,5328
+simple_parsing/docstring.py,sha256=TgvaOcfbvbrq5iDmYX7pGa7Nm7vySN4e23sJRcmiX4E,13770
+simple_parsing/help_formatter.py,sha256=zRrGDNr7ldEleywLxagpIsrILqURvz1KJh-ivRGui0w,3478
+simple_parsing/helpers/__init__.py,sha256=NNSLnxoausqxjzKm-tI-Bp6pr_5nwicUw6mLH8xXXaQ,489
+simple_parsing/helpers/__pycache__/__init__.cpython-310.pyc,,
+simple_parsing/helpers/__pycache__/custom_actions.cpython-310.pyc,,
+simple_parsing/helpers/__pycache__/fields.cpython-310.pyc,,
+simple_parsing/helpers/__pycache__/flatten.cpython-310.pyc,,
+simple_parsing/helpers/__pycache__/nested_partial.cpython-310.pyc,,
+simple_parsing/helpers/__pycache__/partial.cpython-310.pyc,,
+simple_parsing/helpers/__pycache__/subgroups.cpython-310.pyc,,
+simple_parsing/helpers/custom_actions.py,sha256=QtMLRCGQ1pxXrGUbN6rz-P7Md_9kJybxLpQJfcxgZzk,7375
+simple_parsing/helpers/fields.py,sha256=UWOBtl3HPUdjHmyO2KIyzUxTenibm_t6RLmRzhsTJag,15530
+simple_parsing/helpers/flatten.py,sha256=puK9_ekbomF_8Irq_v4HrSRGNZgtUyD3UIfqaIIfHc8,6215
+simple_parsing/helpers/hparams/__init__.py,sha256=DBlmwndvBQoFC2Dpd8hNyADM0kXZW2L0w9wH_3i4Ezk,369
+simple_parsing/helpers/hparams/__pycache__/__init__.cpython-310.pyc,,
+simple_parsing/helpers/hparams/__pycache__/hparam.cpython-310.pyc,,
+simple_parsing/helpers/hparams/__pycache__/hyperparameters.cpython-310.pyc,,
+simple_parsing/helpers/hparams/__pycache__/hyperparameters_test.cpython-310.pyc,,
+simple_parsing/helpers/hparams/__pycache__/priors.cpython-310.pyc,,
+simple_parsing/helpers/hparams/__pycache__/priors_test.cpython-310.pyc,,
+simple_parsing/helpers/hparams/__pycache__/utils.cpython-310.pyc,,
+simple_parsing/helpers/hparams/hparam.py,sha256=4hPxjbv91RScdNc8AbtCbUpPLCV7MrOBzyJM8YMSBIo,11462
+simple_parsing/helpers/hparams/hyperparameters.py,sha256=JHYaURSvQBm7PGYzVxzPAUt784dLgYxFW52wuJRDSEU,11409
+simple_parsing/helpers/hparams/hyperparameters_test.py,sha256=GqO6lwCYnve7QS14y7Jtw6tAJfRwoPtQUtIv_wg9trw,7840
+simple_parsing/helpers/hparams/priors.py,sha256=Srk5xv_xi9LcfULaI-D8_5FWxd-S6KdTzZHon4WsCx0,11900
+simple_parsing/helpers/hparams/priors_test.py,sha256=5ST_GuhzhiTBubWGTaqwB551rzmNkCddGauzYFLH_fs,3980
+simple_parsing/helpers/hparams/utils.py,sha256=aoBoComN9ym2YvV6E5T3_6fF7OBdRUNdZQs9hUgHyMk,440
+simple_parsing/helpers/nested_partial.py,sha256=TDMiMBnvgvPGSX0arswQvjTpT__sTFrmZiI1auR9Zlo,1519
+simple_parsing/helpers/partial.py,sha256=29pPDj6tYKBa9jGrrWXFMvYJNvIF7BmJM-kkRCb1Adk,11020
+simple_parsing/helpers/serialization/__init__.py,sha256=dmg5jfqLwb0ZBs7hNs24Axw20ISuvGAjL8QrUaVmLZc,454
+simple_parsing/helpers/serialization/__pycache__/__init__.cpython-310.pyc,,
+simple_parsing/helpers/serialization/__pycache__/decoding.cpython-310.pyc,,
+simple_parsing/helpers/serialization/__pycache__/encoding.cpython-310.pyc,,
+simple_parsing/helpers/serialization/__pycache__/serializable.cpython-310.pyc,,
+simple_parsing/helpers/serialization/__pycache__/yaml_serialization.cpython-310.pyc,,
+simple_parsing/helpers/serialization/decoding.py,sha256=C8sMKKSGSlW3Mbn_zAYeSiMpIDa2_cyHuCaOfhUYNaI,17202
+simple_parsing/helpers/serialization/encoding.py,sha256=Z3StTpbG7pDGbJPtFr9Au6jT_qYGR2AXeeT08-khfsU,4416
+simple_parsing/helpers/serialization/serializable.py,sha256=76YX4OwnxhKrqsO7whAHJ8iiJmogrWFKHCuefP14oes,33575
+simple_parsing/helpers/serialization/yaml_serialization.py,sha256=JA4T2jhpaBKsvwIpK9EQHNjXU9Yt-fhYUhNckIrF-d0,1897
+simple_parsing/helpers/subgroups.py,sha256=U2YSILcPLpFQrvnjo1eUVvvYpHcAT771eYR7Lnw9Fic,11676
+simple_parsing/parsing.py,sha256=AQfzGmjHeeE25E2jEC-LAKK0bSQlN2-30vIPT44iep4,49892
+simple_parsing/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+simple_parsing/replace.py,sha256=SE_2Rnvlsn_7NC4bk5V0Ov0G6ARQWz5p28Y7VFwPb-E,8223
+simple_parsing/utils.py,sha256=y8nlq5e9nc312x78m9_WDsPJl_Zri6zIuu7RjiHGg_M,29098
+simple_parsing/wrappers/__init__.py,sha256=PCa1-VV-dZfsSIqTtXXocjZvm_bRMPHSoDKkgzhuI6A,164
+simple_parsing/wrappers/__pycache__/__init__.cpython-310.pyc,,
+simple_parsing/wrappers/__pycache__/dataclass_wrapper.cpython-310.pyc,,
+simple_parsing/wrappers/__pycache__/field_metavar.cpython-310.pyc,,
+simple_parsing/wrappers/__pycache__/field_parsing.cpython-310.pyc,,
+simple_parsing/wrappers/__pycache__/field_wrapper.cpython-310.pyc,,
+simple_parsing/wrappers/__pycache__/wrapper.cpython-310.pyc,,
+simple_parsing/wrappers/dataclass_wrapper.py,sha256=-_o9ftXmSmXa_bIYX8bcvkSgMD2ZBvdAZzb2ZjA-YkU,19078
+simple_parsing/wrappers/field_metavar.py,sha256=3zGBjT84iKmx7_VVKq_zUXJHb04Qav8rP0W4cLVPHek,2138
+simple_parsing/wrappers/field_parsing.py,sha256=i-XfrzXymPr5ohbiPYESog8iS0EgJ1WZMgpVom5gC_w,9702
+simple_parsing/wrappers/field_wrapper.py,sha256=3XHJk7q71mcoDbiPq9p9CawqUvBF_qmXMsd81Qnf61M,44606
+simple_parsing/wrappers/wrapper.py,sha256=yCF2eaeehKkJkPXDZ3tZFOrLOFz7ysKjUrlj4O9e2uc,1275
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing-0.1.6.dist-info/REQUESTED b/parrot/lib/python3.10/site-packages/simple_parsing-0.1.6.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing-0.1.6.dist-info/WHEEL b/parrot/lib/python3.10/site-packages/simple_parsing-0.1.6.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..d73ccaae8e0eea45949b0957a5af034099b36aa4
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing-0.1.6.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: poetry-core 1.9.0
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/parrot/lib/python3.10/site-packages/timeout_decorator-0.5.0.dist-info/INSTALLER b/parrot/lib/python3.10/site-packages/timeout_decorator-0.5.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/timeout_decorator-0.5.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/parrot/lib/python3.10/site-packages/timeout_decorator-0.5.0.dist-info/METADATA b/parrot/lib/python3.10/site-packages/timeout_decorator-0.5.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..7e24b77c4b3bf428f435ecf347d38b57267a52c4
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/timeout_decorator-0.5.0.dist-info/METADATA
@@ -0,0 +1,151 @@
+Metadata-Version: 2.2
+Name: timeout-decorator
+Version: 0.5.0
+Summary: Timeout decorator
+Home-page: https://github.com/pnpnpn/timeout-decorator
+Author: Patrick Ng
+Author-email: pn.appdev@gmail.com
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Natural Language :: English
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Dynamic: author
+Dynamic: author-email
+Dynamic: classifier
+Dynamic: description
+Dynamic: home-page
+Dynamic: summary
+
+Timeout decorator
+=================
+
+|Build Status| |Pypi Status| |Coveralls Status|
+
+Installation
+------------
+
+From source code:
+
+::
+
+ python setup.py install
+
+From pypi:
+
+::
+
+ pip install timeout-decorator
+
+Usage
+-----
+
+::
+
+ import time
+ import timeout_decorator
+
+ @timeout_decorator.timeout(5)
+ def mytest():
+ print("Start")
+ for i in range(1,10):
+ time.sleep(1)
+ print("{} seconds have passed".format(i))
+
+ if __name__ == '__main__':
+ mytest()
+
+Specify an alternate exception to raise on timeout:
+
+::
+
+ import time
+ import timeout_decorator
+
+ @timeout_decorator.timeout(5, timeout_exception=StopIteration)
+ def mytest():
+ print("Start")
+ for i in range(1,10):
+ time.sleep(1)
+ print("{} seconds have passed".format(i))
+
+ if __name__ == '__main__':
+ mytest()
+
+Multithreading
+--------------
+
+By default, timeout-decorator uses signals to limit the execution time
+of the given function. This appoach does not work if your function is
+executed not in a main thread (for example if it's a worker thread of
+the web application). There is alternative timeout strategy for this
+case - by using multiprocessing. To use it, just pass
+``use_signals=False`` to the timeout decorator function:
+
+::
+
+ import time
+ import timeout_decorator
+
+ @timeout_decorator.timeout(5, use_signals=False)
+ def mytest():
+ print "Start"
+ for i in range(1,10):
+ time.sleep(1)
+ print("{} seconds have passed".format(i))
+
+ if __name__ == '__main__':
+ mytest()
+
+.. warning::
+ Make sure that in case of multiprocessing strategy for timeout, your function does not return objects which cannot
+ be pickled, otherwise it will fail at marshalling it between master and child processes.
+
+
+Acknowledgement
+---------------
+
+Derived from
+http://www.saltycrane.com/blog/2010/04/using-python-timeout-decorator-uploading-s3/
+and https://code.google.com/p/verse-quiz/source/browse/trunk/timeout.py
+
+Contribute
+----------
+
+I would love for you to fork and send me pull request for this project.
+Please contribute.
+
+License
+-------
+
+This software is licensed under the `MIT license `_
+
+See `License file `_
+
+.. |Build Status| image:: https://travis-ci.org/pnpnpn/timeout-decorator.svg?branch=master
+ :target: https://travis-ci.org/pnpnpn/timeout-decorator
+.. |Pypi Status| image:: https://badge.fury.io/py/timeout-decorator.svg
+ :target: https://badge.fury.io/py/timeout-decorator
+.. |Coveralls Status| image:: https://coveralls.io/repos/pnpnpn/timeout-decorator/badge.png?branch=master
+ :target: https://coveralls.io/r/pnpnpn/timeout-decorator
+
+Changelog
+=========
+
+0.3.1
+-----
+- Fixed issue with PicklingError causes the timeout to never be reached.
+
+0.3.0
+-----
+
+- Added optional threading support via python multiprocessing (bubenkoff)
+- Switched to pytest test runner (bubenkoff)
+
+
+0.2.1
+-----
+
+- Initial public release
diff --git a/parrot/lib/python3.10/site-packages/timeout_decorator-0.5.0.dist-info/RECORD b/parrot/lib/python3.10/site-packages/timeout_decorator-0.5.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..176b54a1ce748120c0b6917ee6bc4784087269ac
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/timeout_decorator-0.5.0.dist-info/RECORD
@@ -0,0 +1,10 @@
+timeout_decorator-0.5.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+timeout_decorator-0.5.0.dist-info/METADATA,sha256=SZ-eRUdq-nbGgx7iubc1Wr5jKEvUBL2Uzk0MAE7Ua-M,3737
+timeout_decorator-0.5.0.dist-info/RECORD,,
+timeout_decorator-0.5.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+timeout_decorator-0.5.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
+timeout_decorator-0.5.0.dist-info/top_level.txt,sha256=BGuKi8ELnOderOS82EMu3soEBpUvC1AImcJHs42ABak,18
+timeout_decorator/__init__.py,sha256=wGXVBo1k8z9zJdpe-7_SgrLgR5EpkrZM1zD2DqiopZI,163
+timeout_decorator/__pycache__/__init__.cpython-310.pyc,,
+timeout_decorator/__pycache__/timeout_decorator.cpython-310.pyc,,
+timeout_decorator/timeout_decorator.py,sha256=7MS9r_kRWX74-W7dK6dgcMss2QxarnmRZenVNXJx4EE,6419
diff --git a/parrot/lib/python3.10/site-packages/timeout_decorator-0.5.0.dist-info/REQUESTED b/parrot/lib/python3.10/site-packages/timeout_decorator-0.5.0.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/parrot/lib/python3.10/site-packages/timeout_decorator-0.5.0.dist-info/top_level.txt b/parrot/lib/python3.10/site-packages/timeout_decorator-0.5.0.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..170724ae12b940eec28c9fabc627734a0ceac92a
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/timeout_decorator-0.5.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+timeout_decorator
diff --git a/parrot/lib/python3.10/site-packages/trl/core.py b/parrot/lib/python3.10/site-packages/trl/core.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e7bb840f6e1465ba337509dd2a3aadf3608d4ab
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/trl/core.py
@@ -0,0 +1,318 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import gc
+import random
+import warnings
+from contextlib import contextmanager
+from typing import Dict, List, Optional, Tuple, Union
+
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torch.nn.utils.rnn import pad_sequence
+from transformers import TopKLogitsWarper, TopPLogitsWarper, is_torch_npu_available, is_torch_xpu_available
+
+
+try:
+ from collections.abc import Mapping
+except ImportError:
+ from collections.abc import Mapping
+
+
+WANDB_PADDING = -1
+
+
+def top_k_top_p_filtering(
+ logits: torch.FloatTensor,
+ top_k: int = 0,
+ top_p: float = 1.0,
+ filter_value: float = -float("Inf"),
+ min_tokens_to_keep: int = 1,
+) -> torch.FloatTensor:
+ """
+ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering.
+
+ Args:
+ logits: logits distribution shape (batch size, vocabulary size)
+ top_k (`int`, *optional*, defaults to 0):
+ If > 0, only keep the top k tokens with highest probability (top-k filtering)
+ top_p (`float`, *optional*, defaults to 1.0):
+ If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus
+ filtering is described in Holtzman et al. (https://huggingface.co/papers/1904.09751)
+ min_tokens_to_keep (`int`, *optional*, defaults to 1):
+ Minimumber of tokens we keep per batch example in the output.
+
+ From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
+ """
+
+ if top_k > 0:
+ logits = TopKLogitsWarper(top_k=top_k, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)(
+ None, logits
+ )
+
+ if 0 <= top_p <= 1.0:
+ logits = TopPLogitsWarper(top_p=top_p, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)(
+ None, logits
+ )
+
+ return logits
+
+
+def flatten_dict(nested: Dict, sep: str = "/") -> Dict:
+ """Flatten dictionary and concatenate nested keys with separator."""
+
+ def recurse(nest: Dict, prefix: str, into: Dict) -> None:
+ for k, v in nest.items():
+ if sep in k:
+ raise ValueError(f"separator '{sep}' not allowed to be in key '{k}'")
+ if isinstance(v, Mapping):
+ recurse(v, prefix + k + sep, into)
+ else:
+ into[prefix + k] = v
+
+ flat = {}
+ recurse(nested, "", flat)
+ return flat
+
+
+def convert_to_scalar(stats: Dict) -> Dict:
+ """
+ Converts the stats from a flattened dict to single scalar dicts
+ """
+ tensorboard_stats = {}
+ for k, v in stats.items():
+ # for tensorboard compatibility - arrays and tensors are ignored with tensorboard
+ # therefore we convert single element tensors to scalars
+ if (isinstance(v, torch.Tensor) or isinstance(v, np.ndarray)) and (
+ len(v.shape) == 0 or (len(v.shape) == 1 and v.shape[0] == 1)
+ ):
+ v = v.item()
+ tensorboard_stats[k] = v
+ return tensorboard_stats
+
+
+def stack_dicts(stats_dicts: List[Dict]) -> Dict:
+ """Stack the values of a dict."""
+ results = dict()
+ for k in stats_dicts[0]:
+ stats_list = [torch.flatten(d[k]) for d in stats_dicts]
+ results[k] = pad_sequence(stats_list, batch_first=True, padding_value=WANDB_PADDING)
+ return results
+
+
+def logprobs_from_logits(logits: torch.Tensor, labels: torch.Tensor, gather: bool = True) -> torch.Tensor:
+ """
+ See: https://github.com/pytorch/pytorch/issues/563#issuecomment-330103591
+ """
+ logp = F.log_softmax(logits, dim=2)
+
+ if not gather:
+ return logp
+ logpy = torch.gather(logp, 2, labels.unsqueeze(2)).squeeze(-1)
+ return logpy
+
+
+def whiten(values: torch.Tensor, shift_mean: bool = True) -> torch.Tensor:
+ """Whiten values."""
+ mean, var = torch.mean(values), torch.var(values)
+ whitened = (values - mean) * torch.rsqrt(var + 1e-8)
+ if not shift_mean:
+ whitened += mean
+ return whitened
+
+
+def masked_mean(values: torch.Tensor, mask: torch.Tensor, axis: Optional[bool] = None) -> torch.Tensor:
+ """Compute mean of tensor with a masked values."""
+ if axis is not None:
+ return (values * mask).sum(axis=axis) / mask.sum(axis=axis)
+ else:
+ return (values * mask).sum() / mask.sum()
+
+
+def masked_var(values: torch.Tensor, mask: torch.Tensor, unbiased: bool = True) -> torch.Tensor:
+ """Compute variance of tensor with masked values."""
+ mean = masked_mean(values, mask)
+ centered_values = values - mean
+ variance = masked_mean(centered_values**2, mask)
+ if unbiased:
+ mask_sum = mask.sum()
+ if mask_sum == 0:
+ raise ValueError(
+ "The sum of the mask is zero, which can happen when `mini_batch_size=1`;"
+ "try increase the `mini_batch_size` or `gradient_accumulation_steps`"
+ )
+ # note that if mask_sum == 1, then there is a division by zero issue
+ # to avoid it you just need to use a larger minibatch_size
+ bessel_correction = mask_sum / (mask_sum - 1)
+ variance = variance * bessel_correction
+ return variance
+
+
+def masked_whiten(values: torch.Tensor, mask: torch.Tensor, shift_mean: bool = True) -> torch.Tensor:
+ """Whiten values with masked values."""
+ mean, var = masked_mean(values, mask), masked_var(values, mask)
+ whitened = (values - mean) * torch.rsqrt(var + 1e-8)
+ if not shift_mean:
+ whitened += mean
+ return whitened
+
+
+def clip_by_value(x: torch.Tensor, tensor_min: float, tensor_max: float) -> torch.Tensor:
+ """
+ Tensor extension to torch.clamp
+ https://github.com/pytorch/pytorch/issues/2793#issuecomment-428784713
+ """
+ clipped = torch.max(torch.min(x, tensor_max), tensor_min)
+ return clipped
+
+
+def entropy_from_logits(logits: torch.Tensor) -> torch.Tensor:
+ """Calculate entropy from logits."""
+ pd = torch.nn.functional.softmax(logits, dim=-1)
+ entropy = torch.logsumexp(logits, axis=-1) - torch.sum(pd * logits, axis=-1)
+ return entropy
+
+
+def stats_to_np(stats_dict: Dict) -> Dict:
+ """Cast all torch.tensors in dict to numpy arrays."""
+ new_dict = dict()
+ for k, v in stats_dict.items():
+ if isinstance(v, torch.Tensor):
+ new_dict[k] = v.detach().cpu()
+ if new_dict[k].dtype == torch.bfloat16:
+ new_dict[k] = new_dict[k].float()
+ new_dict[k] = new_dict[k].numpy()
+ else:
+ new_dict[k] = v
+ if np.isscalar(new_dict[k]):
+ new_dict[k] = float(new_dict[k])
+ return new_dict
+
+
+def respond_to_batch(
+ model: nn.Module, queries: List[torch.LongTensor], txt_len: int = 20, top_k: int = 0, top_p: float = 1.0
+) -> torch.LongTensor:
+ """Sample text from language model."""
+ input_ids = queries
+ for _i in range(txt_len):
+ # Get Logits
+ outputs = model(input_ids)
+ next_token_logits = outputs[0][:, -1, :]
+ next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
+ # Sample
+ probs = F.softmax(next_token_logits, dim=-1)
+ next_token = torch.multinomial(probs, num_samples=1).squeeze(1)
+ input_ids = torch.cat([input_ids, next_token.unsqueeze(-1)], dim=-1)
+ return input_ids[:, -txt_len:]
+
+
+def set_seed(seed: int) -> None:
+ """
+ Helper function for reproducible behavior to set the seed in `random`, `numpy`, and `torch`.
+
+ Args:
+ seed (`int`): The seed to set.
+ """
+ random.seed(seed)
+ np.random.seed(seed)
+ torch.manual_seed(seed)
+ if is_torch_xpu_available():
+ torch.xpu.manual_seed_all(seed)
+ elif is_torch_npu_available():
+ torch.npu.manual_seed_all(seed)
+ else:
+ torch.cuda.manual_seed_all(seed)
+
+
+class LengthSampler:
+ """
+ Samples a length
+ """
+
+ def __init__(self, min_value: int, max_value: int):
+ self.values = list(range(min_value, max_value))
+
+ def __call__(self) -> int:
+ return np.random.choice(self.values)
+
+
+class PPODecorators:
+ optimize_device_cache = False
+
+ @classmethod
+ @contextmanager
+ def empty_device_cache(cls):
+ yield
+ if cls.optimize_device_cache:
+ if is_torch_xpu_available():
+ gc.collect()
+ torch.xpu.empty_cache()
+ gc.collect()
+ elif is_torch_npu_available():
+ gc.collect()
+ torch.npu.empty_cache()
+ gc.collect()
+ elif torch.cuda.is_available():
+ gc.collect()
+ torch.cuda.empty_cache()
+ gc.collect()
+
+
+def randn_tensor(
+ shape: Union[Tuple, List],
+ generator: Optional[Union[List[torch.Generator], torch.Generator]] = None,
+ device: Optional[torch.device] = None,
+ dtype: Optional[torch.dtype] = None,
+ layout: Optional[torch.layout] = None,
+) -> torch.Tensor:
+ """A helper function to create random tensors on the desired `device` with the desired `dtype`. When
+ passing a list of generators, you can seed each batch size individually. If CPU generators are passed, the tensor
+ is always created on the CPU.
+ """
+ # device on which tensor is created defaults to device
+ rand_device = device
+ batch_size = shape[0]
+
+ layout = layout or torch.strided
+ device = device or torch.device("cpu")
+
+ if generator is not None:
+ gen_device_type = generator.device.type if not isinstance(generator, list) else generator[0].device.type
+ if gen_device_type != device.type and gen_device_type == "cpu":
+ rand_device = "cpu"
+ if device != "mps":
+ warnings.warn(
+ f"The passed generator was created on 'cpu' even though a tensor on {device} was expected."
+ f" Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably"
+ f" slighly speed up this function by passing a generator that was created on the {device} device."
+ )
+ elif gen_device_type != device.type and gen_device_type == "cuda":
+ raise ValueError(f"Cannot generate a {device} tensor from a generator of type {gen_device_type}.")
+
+ # make sure generator list of length 1 is treated like a non-list
+ if isinstance(generator, list) and len(generator) == 1:
+ generator = generator[0]
+
+ if isinstance(generator, list):
+ shape = (1,) + shape[1:]
+ latents = [
+ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype, layout=layout)
+ for i in range(batch_size)
+ ]
+ latents = torch.cat(latents, dim=0).to(device)
+ else:
+ latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype, layout=layout).to(device)
+
+ return latents
diff --git a/parrot/lib/python3.10/site-packages/trl/env_utils.py b/parrot/lib/python3.10/site-packages/trl/env_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..64e98199e0268bdc479053dafc0ff93e26db1e48
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/trl/env_utils.py
@@ -0,0 +1,34 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Function `strtobool` copied and adapted from `distutils` (as deprected
+# in Python 3.10).
+# Reference: https://github.com/python/cpython/blob/48f9d3e3faec5faaa4f7c9849fecd27eae4da213/Lib/distutils/util.py#L308-L321
+
+
+def strtobool(val: str) -> bool:
+ """Convert a string representation of truth to True or False booleans.
+
+ True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
+ are 'n', 'no', 'f', 'false', 'off', and '0'.
+
+ Raises:
+ ValueError: if 'val' is anything else.
+ """
+ val = val.lower()
+ if val in ("y", "yes", "t", "true", "on", "1"):
+ return True
+ if val in ("n", "no", "f", "false", "off", "0"):
+ return False
+ raise ValueError(f"Invalid truth value, it should be a string but {val} was provided instead.")
diff --git a/parrot/lib/python3.10/site-packages/trl/extras/__init__.py b/parrot/lib/python3.10/site-packages/trl/extras/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef6b216986a102532e67aefb3779669740ec8e12
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/trl/extras/__init__.py
@@ -0,0 +1,30 @@
+# flake8: noqa
+
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ..import_utils import _LazyModule
+
+
+_import_structure = {
+ "best_of_n_sampler": ["BestOfNSampler"],
+}
+
+if TYPE_CHECKING:
+ from .best_of_n_sampler import BestOfNSampler
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/parrot/lib/python3.10/site-packages/trl/import_utils.py b/parrot/lib/python3.10/site-packages/trl/import_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..988c1cdb147edc3eb16cf4f1920ce48be0e42d35
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/trl/import_utils.py
@@ -0,0 +1,154 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import importlib
+import os
+import sys
+from itertools import chain
+from types import ModuleType
+from typing import Any
+
+from transformers.utils.import_utils import _is_package_available
+
+
+if sys.version_info < (3, 8):
+ _is_python_greater_3_8 = False
+else:
+ _is_python_greater_3_8 = True
+
+# Use same as transformers.utils.import_utils
+_diffusers_available = _is_package_available("diffusers")
+_unsloth_available = _is_package_available("unsloth")
+_rich_available = _is_package_available("rich")
+_liger_kernel_available = _is_package_available("liger_kernel")
+_llmblender_available = _is_package_available("llm_blender")
+
+
+def is_diffusers_available() -> bool:
+ return _diffusers_available
+
+
+def is_unsloth_available() -> bool:
+ return _unsloth_available
+
+
+def is_rich_available() -> bool:
+ return _rich_available
+
+
+def is_liger_kernel_available() -> bool: # replace by transformers.import_utils.is_liger_kernel_available() from v4.45
+ return _liger_kernel_available
+
+
+def is_llmblender_available() -> bool:
+ return _llmblender_available
+
+
+def is_accelerate_greater_20_0() -> bool:
+ if _is_python_greater_3_8:
+ from importlib.metadata import version
+
+ accelerate_version = version("accelerate")
+ else:
+ import pkg_resources
+
+ accelerate_version = pkg_resources.get_distribution("accelerate").version
+ return accelerate_version >= "0.20.0"
+
+
+def is_transformers_greater_than(current_version: str) -> bool:
+ if _is_python_greater_3_8:
+ from importlib.metadata import version
+
+ _transformers_version = version("transformers")
+ else:
+ import pkg_resources
+
+ _transformers_version = pkg_resources.get_distribution("transformers").version
+ return _transformers_version > current_version
+
+
+def is_torch_greater_2_0() -> bool:
+ if _is_python_greater_3_8:
+ from importlib.metadata import version
+
+ torch_version = version("torch")
+ else:
+ import pkg_resources
+
+ torch_version = pkg_resources.get_distribution("torch").version
+ return torch_version >= "2.0"
+
+
+class _LazyModule(ModuleType):
+ """
+ Module class that surfaces all objects but only performs associated imports when the objects are requested.
+ """
+
+ # Very heavily inspired by optuna.integration._IntegrationModule
+ # https://github.com/optuna/optuna/blob/master/optuna/integration/__init__.py
+ def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None):
+ super().__init__(name)
+ self._modules = set(import_structure.keys())
+ self._class_to_module = {}
+ for key, values in import_structure.items():
+ for value in values:
+ self._class_to_module[value] = key
+ # Needed for autocompletion in an IDE
+ self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values()))
+ self.__file__ = module_file
+ self.__spec__ = module_spec
+ self.__path__ = [os.path.dirname(module_file)]
+ self._objects = {} if extra_objects is None else extra_objects
+ self._name = name
+ self._import_structure = import_structure
+
+ # Needed for autocompletion in an IDE
+ def __dir__(self):
+ result = super().__dir__()
+ # The elements of self.__all__ that are submodules may or may not be in the dir already, depending on whether
+ # they have been accessed or not. So we only add the elements of self.__all__ that are not already in the dir.
+ for attr in self.__all__:
+ if attr not in result:
+ result.append(attr)
+ return result
+
+ def __getattr__(self, name: str) -> Any:
+ if name in self._objects:
+ return self._objects[name]
+ if name in self._modules:
+ value = self._get_module(name)
+ elif name in self._class_to_module.keys():
+ module = self._get_module(self._class_to_module[name])
+ value = getattr(module, name)
+ else:
+ raise AttributeError(f"module {self.__name__} has no attribute {name}")
+
+ setattr(self, name, value)
+ return value
+
+ def _get_module(self, module_name: str):
+ try:
+ return importlib.import_module("." + module_name, self.__name__)
+ except Exception as e:
+ raise RuntimeError(
+ f"Failed to import {self.__name__}.{module_name} because of the following error (look up to see its"
+ f" traceback):\n{e}"
+ ) from e
+
+ def __reduce__(self):
+ return (self.__class__, (self._name, self.__file__, self._import_structure))
+
+
+class OptionalDependencyNotAvailable(BaseException):
+ """Internally used error class for signalling an optional dependency was not found."""