Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- vllm/lib/python3.10/site-packages/annotated_types/__init__.py +432 -0
- vllm/lib/python3.10/site-packages/annotated_types/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/annotated_types/__pycache__/test_cases.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/annotated_types/py.typed +0 -0
- vllm/lib/python3.10/site-packages/annotated_types/test_cases.py +151 -0
- vllm/lib/python3.10/site-packages/av-14.1.0.dist-info/METADATA +133 -0
- vllm/lib/python3.10/site-packages/gitdb/__init__.py +16 -0
- vllm/lib/python3.10/site-packages/gitdb/base.py +315 -0
- vllm/lib/python3.10/site-packages/gitdb/const.py +4 -0
- vllm/lib/python3.10/site-packages/gitdb/db/__init__.py +11 -0
- vllm/lib/python3.10/site-packages/gitdb/db/__pycache__/base.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/gitdb/db/__pycache__/git.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/gitdb/db/__pycache__/pack.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/gitdb/db/base.py +278 -0
- vllm/lib/python3.10/site-packages/gitdb/db/git.py +85 -0
- vllm/lib/python3.10/site-packages/gitdb/db/mem.py +110 -0
- vllm/lib/python3.10/site-packages/gitdb/db/pack.py +206 -0
- vllm/lib/python3.10/site-packages/gitdb/db/ref.py +82 -0
- vllm/lib/python3.10/site-packages/gitdb/exc.py +57 -0
- vllm/lib/python3.10/site-packages/gitdb/fun.py +704 -0
- vllm/lib/python3.10/site-packages/gitdb/pack.py +1031 -0
- vllm/lib/python3.10/site-packages/gitdb/stream.py +730 -0
- vllm/lib/python3.10/site-packages/gitdb/test/__pycache__/lib.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/gitdb/test/__pycache__/test_base.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/gitdb/test/__pycache__/test_stream.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/gitdb/test/__pycache__/test_util.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/gitdb/test/lib.py +192 -0
- vllm/lib/python3.10/site-packages/gitdb/test/test_base.py +105 -0
- vllm/lib/python3.10/site-packages/gitdb/test/test_example.py +43 -0
- vllm/lib/python3.10/site-packages/gitdb/test/test_stream.py +164 -0
- vllm/lib/python3.10/site-packages/gitdb/test/test_util.py +100 -0
- vllm/lib/python3.10/site-packages/gitdb/typ.py +10 -0
- vllm/lib/python3.10/site-packages/gitdb/util.py +398 -0
- vllm/lib/python3.10/site-packages/gitdb/utils/encoding.py +18 -0
- vllm/lib/python3.10/site-packages/tqdm/__main__.py +3 -0
- vllm/lib/python3.10/site-packages/tqdm/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/tqdm/__pycache__/__main__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/tqdm/__pycache__/_dist_ver.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/tqdm/__pycache__/_main.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/tqdm/__pycache__/_monitor.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_gui.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_notebook.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_pandas.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/tqdm/__pycache__/_utils.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/tqdm/__pycache__/asyncio.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/tqdm/__pycache__/auto.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/tqdm/__pycache__/autonotebook.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/tqdm/__pycache__/cli.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/tqdm/__pycache__/dask.cpython-310.pyc +0 -0
vllm/lib/python3.10/site-packages/annotated_types/__init__.py
ADDED
|
@@ -0,0 +1,432 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import sys
|
| 3 |
+
import types
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from datetime import tzinfo
|
| 6 |
+
from typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, SupportsFloat, SupportsIndex, TypeVar, Union
|
| 7 |
+
|
| 8 |
+
if sys.version_info < (3, 8):
|
| 9 |
+
from typing_extensions import Protocol, runtime_checkable
|
| 10 |
+
else:
|
| 11 |
+
from typing import Protocol, runtime_checkable
|
| 12 |
+
|
| 13 |
+
if sys.version_info < (3, 9):
|
| 14 |
+
from typing_extensions import Annotated, Literal
|
| 15 |
+
else:
|
| 16 |
+
from typing import Annotated, Literal
|
| 17 |
+
|
| 18 |
+
if sys.version_info < (3, 10):
|
| 19 |
+
EllipsisType = type(Ellipsis)
|
| 20 |
+
KW_ONLY = {}
|
| 21 |
+
SLOTS = {}
|
| 22 |
+
else:
|
| 23 |
+
from types import EllipsisType
|
| 24 |
+
|
| 25 |
+
KW_ONLY = {"kw_only": True}
|
| 26 |
+
SLOTS = {"slots": True}
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
__all__ = (
|
| 30 |
+
'BaseMetadata',
|
| 31 |
+
'GroupedMetadata',
|
| 32 |
+
'Gt',
|
| 33 |
+
'Ge',
|
| 34 |
+
'Lt',
|
| 35 |
+
'Le',
|
| 36 |
+
'Interval',
|
| 37 |
+
'MultipleOf',
|
| 38 |
+
'MinLen',
|
| 39 |
+
'MaxLen',
|
| 40 |
+
'Len',
|
| 41 |
+
'Timezone',
|
| 42 |
+
'Predicate',
|
| 43 |
+
'LowerCase',
|
| 44 |
+
'UpperCase',
|
| 45 |
+
'IsDigits',
|
| 46 |
+
'IsFinite',
|
| 47 |
+
'IsNotFinite',
|
| 48 |
+
'IsNan',
|
| 49 |
+
'IsNotNan',
|
| 50 |
+
'IsInfinite',
|
| 51 |
+
'IsNotInfinite',
|
| 52 |
+
'doc',
|
| 53 |
+
'DocInfo',
|
| 54 |
+
'__version__',
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
__version__ = '0.7.0'
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
T = TypeVar('T')
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
# arguments that start with __ are considered
|
| 64 |
+
# positional only
|
| 65 |
+
# see https://peps.python.org/pep-0484/#positional-only-arguments
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class SupportsGt(Protocol):
|
| 69 |
+
def __gt__(self: T, __other: T) -> bool:
|
| 70 |
+
...
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class SupportsGe(Protocol):
|
| 74 |
+
def __ge__(self: T, __other: T) -> bool:
|
| 75 |
+
...
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class SupportsLt(Protocol):
|
| 79 |
+
def __lt__(self: T, __other: T) -> bool:
|
| 80 |
+
...
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class SupportsLe(Protocol):
|
| 84 |
+
def __le__(self: T, __other: T) -> bool:
|
| 85 |
+
...
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class SupportsMod(Protocol):
|
| 89 |
+
def __mod__(self: T, __other: T) -> T:
|
| 90 |
+
...
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
class SupportsDiv(Protocol):
|
| 94 |
+
def __div__(self: T, __other: T) -> T:
|
| 95 |
+
...
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class BaseMetadata:
|
| 99 |
+
"""Base class for all metadata.
|
| 100 |
+
|
| 101 |
+
This exists mainly so that implementers
|
| 102 |
+
can do `isinstance(..., BaseMetadata)` while traversing field annotations.
|
| 103 |
+
"""
|
| 104 |
+
|
| 105 |
+
__slots__ = ()
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
@dataclass(frozen=True, **SLOTS)
|
| 109 |
+
class Gt(BaseMetadata):
|
| 110 |
+
"""Gt(gt=x) implies that the value must be greater than x.
|
| 111 |
+
|
| 112 |
+
It can be used with any type that supports the ``>`` operator,
|
| 113 |
+
including numbers, dates and times, strings, sets, and so on.
|
| 114 |
+
"""
|
| 115 |
+
|
| 116 |
+
gt: SupportsGt
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
@dataclass(frozen=True, **SLOTS)
|
| 120 |
+
class Ge(BaseMetadata):
|
| 121 |
+
"""Ge(ge=x) implies that the value must be greater than or equal to x.
|
| 122 |
+
|
| 123 |
+
It can be used with any type that supports the ``>=`` operator,
|
| 124 |
+
including numbers, dates and times, strings, sets, and so on.
|
| 125 |
+
"""
|
| 126 |
+
|
| 127 |
+
ge: SupportsGe
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
@dataclass(frozen=True, **SLOTS)
|
| 131 |
+
class Lt(BaseMetadata):
|
| 132 |
+
"""Lt(lt=x) implies that the value must be less than x.
|
| 133 |
+
|
| 134 |
+
It can be used with any type that supports the ``<`` operator,
|
| 135 |
+
including numbers, dates and times, strings, sets, and so on.
|
| 136 |
+
"""
|
| 137 |
+
|
| 138 |
+
lt: SupportsLt
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
@dataclass(frozen=True, **SLOTS)
|
| 142 |
+
class Le(BaseMetadata):
|
| 143 |
+
"""Le(le=x) implies that the value must be less than or equal to x.
|
| 144 |
+
|
| 145 |
+
It can be used with any type that supports the ``<=`` operator,
|
| 146 |
+
including numbers, dates and times, strings, sets, and so on.
|
| 147 |
+
"""
|
| 148 |
+
|
| 149 |
+
le: SupportsLe
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
@runtime_checkable
|
| 153 |
+
class GroupedMetadata(Protocol):
|
| 154 |
+
"""A grouping of multiple objects, like typing.Unpack.
|
| 155 |
+
|
| 156 |
+
`GroupedMetadata` on its own is not metadata and has no meaning.
|
| 157 |
+
All of the constraints and metadata should be fully expressable
|
| 158 |
+
in terms of the `BaseMetadata`'s returned by `GroupedMetadata.__iter__()`.
|
| 159 |
+
|
| 160 |
+
Concrete implementations should override `GroupedMetadata.__iter__()`
|
| 161 |
+
to add their own metadata.
|
| 162 |
+
For example:
|
| 163 |
+
|
| 164 |
+
>>> @dataclass
|
| 165 |
+
>>> class Field(GroupedMetadata):
|
| 166 |
+
>>> gt: float | None = None
|
| 167 |
+
>>> description: str | None = None
|
| 168 |
+
...
|
| 169 |
+
>>> def __iter__(self) -> Iterable[object]:
|
| 170 |
+
>>> if self.gt is not None:
|
| 171 |
+
>>> yield Gt(self.gt)
|
| 172 |
+
>>> if self.description is not None:
|
| 173 |
+
>>> yield Description(self.gt)
|
| 174 |
+
|
| 175 |
+
Also see the implementation of `Interval` below for an example.
|
| 176 |
+
|
| 177 |
+
Parsers should recognize this and unpack it so that it can be used
|
| 178 |
+
both with and without unpacking:
|
| 179 |
+
|
| 180 |
+
- `Annotated[int, Field(...)]` (parser must unpack Field)
|
| 181 |
+
- `Annotated[int, *Field(...)]` (PEP-646)
|
| 182 |
+
""" # noqa: trailing-whitespace
|
| 183 |
+
|
| 184 |
+
@property
|
| 185 |
+
def __is_annotated_types_grouped_metadata__(self) -> Literal[True]:
|
| 186 |
+
return True
|
| 187 |
+
|
| 188 |
+
def __iter__(self) -> Iterator[object]:
|
| 189 |
+
...
|
| 190 |
+
|
| 191 |
+
if not TYPE_CHECKING:
|
| 192 |
+
__slots__ = () # allow subclasses to use slots
|
| 193 |
+
|
| 194 |
+
def __init_subclass__(cls, *args: Any, **kwargs: Any) -> None:
|
| 195 |
+
# Basic ABC like functionality without the complexity of an ABC
|
| 196 |
+
super().__init_subclass__(*args, **kwargs)
|
| 197 |
+
if cls.__iter__ is GroupedMetadata.__iter__:
|
| 198 |
+
raise TypeError("Can't subclass GroupedMetadata without implementing __iter__")
|
| 199 |
+
|
| 200 |
+
def __iter__(self) -> Iterator[object]: # noqa: F811
|
| 201 |
+
raise NotImplementedError # more helpful than "None has no attribute..." type errors
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
@dataclass(frozen=True, **KW_ONLY, **SLOTS)
|
| 205 |
+
class Interval(GroupedMetadata):
|
| 206 |
+
"""Interval can express inclusive or exclusive bounds with a single object.
|
| 207 |
+
|
| 208 |
+
It accepts keyword arguments ``gt``, ``ge``, ``lt``, and/or ``le``, which
|
| 209 |
+
are interpreted the same way as the single-bound constraints.
|
| 210 |
+
"""
|
| 211 |
+
|
| 212 |
+
gt: Union[SupportsGt, None] = None
|
| 213 |
+
ge: Union[SupportsGe, None] = None
|
| 214 |
+
lt: Union[SupportsLt, None] = None
|
| 215 |
+
le: Union[SupportsLe, None] = None
|
| 216 |
+
|
| 217 |
+
def __iter__(self) -> Iterator[BaseMetadata]:
|
| 218 |
+
"""Unpack an Interval into zero or more single-bounds."""
|
| 219 |
+
if self.gt is not None:
|
| 220 |
+
yield Gt(self.gt)
|
| 221 |
+
if self.ge is not None:
|
| 222 |
+
yield Ge(self.ge)
|
| 223 |
+
if self.lt is not None:
|
| 224 |
+
yield Lt(self.lt)
|
| 225 |
+
if self.le is not None:
|
| 226 |
+
yield Le(self.le)
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
@dataclass(frozen=True, **SLOTS)
|
| 230 |
+
class MultipleOf(BaseMetadata):
|
| 231 |
+
"""MultipleOf(multiple_of=x) might be interpreted in two ways:
|
| 232 |
+
|
| 233 |
+
1. Python semantics, implying ``value % multiple_of == 0``, or
|
| 234 |
+
2. JSONschema semantics, where ``int(value / multiple_of) == value / multiple_of``
|
| 235 |
+
|
| 236 |
+
We encourage users to be aware of these two common interpretations,
|
| 237 |
+
and libraries to carefully document which they implement.
|
| 238 |
+
"""
|
| 239 |
+
|
| 240 |
+
multiple_of: Union[SupportsDiv, SupportsMod]
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
@dataclass(frozen=True, **SLOTS)
|
| 244 |
+
class MinLen(BaseMetadata):
|
| 245 |
+
"""
|
| 246 |
+
MinLen() implies minimum inclusive length,
|
| 247 |
+
e.g. ``len(value) >= min_length``.
|
| 248 |
+
"""
|
| 249 |
+
|
| 250 |
+
min_length: Annotated[int, Ge(0)]
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
@dataclass(frozen=True, **SLOTS)
|
| 254 |
+
class MaxLen(BaseMetadata):
|
| 255 |
+
"""
|
| 256 |
+
MaxLen() implies maximum inclusive length,
|
| 257 |
+
e.g. ``len(value) <= max_length``.
|
| 258 |
+
"""
|
| 259 |
+
|
| 260 |
+
max_length: Annotated[int, Ge(0)]
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
@dataclass(frozen=True, **SLOTS)
|
| 264 |
+
class Len(GroupedMetadata):
|
| 265 |
+
"""
|
| 266 |
+
Len() implies that ``min_length <= len(value) <= max_length``.
|
| 267 |
+
|
| 268 |
+
Upper bound may be omitted or ``None`` to indicate no upper length bound.
|
| 269 |
+
"""
|
| 270 |
+
|
| 271 |
+
min_length: Annotated[int, Ge(0)] = 0
|
| 272 |
+
max_length: Optional[Annotated[int, Ge(0)]] = None
|
| 273 |
+
|
| 274 |
+
def __iter__(self) -> Iterator[BaseMetadata]:
|
| 275 |
+
"""Unpack a Len into zone or more single-bounds."""
|
| 276 |
+
if self.min_length > 0:
|
| 277 |
+
yield MinLen(self.min_length)
|
| 278 |
+
if self.max_length is not None:
|
| 279 |
+
yield MaxLen(self.max_length)
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
@dataclass(frozen=True, **SLOTS)
|
| 283 |
+
class Timezone(BaseMetadata):
|
| 284 |
+
"""Timezone(tz=...) requires a datetime to be aware (or ``tz=None``, naive).
|
| 285 |
+
|
| 286 |
+
``Annotated[datetime, Timezone(None)]`` must be a naive datetime.
|
| 287 |
+
``Timezone[...]`` (the ellipsis literal) expresses that the datetime must be
|
| 288 |
+
tz-aware but any timezone is allowed.
|
| 289 |
+
|
| 290 |
+
You may also pass a specific timezone string or tzinfo object such as
|
| 291 |
+
``Timezone(timezone.utc)`` or ``Timezone("Africa/Abidjan")`` to express that
|
| 292 |
+
you only allow a specific timezone, though we note that this is often
|
| 293 |
+
a symptom of poor design.
|
| 294 |
+
"""
|
| 295 |
+
|
| 296 |
+
tz: Union[str, tzinfo, EllipsisType, None]
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
@dataclass(frozen=True, **SLOTS)
|
| 300 |
+
class Unit(BaseMetadata):
|
| 301 |
+
"""Indicates that the value is a physical quantity with the specified unit.
|
| 302 |
+
|
| 303 |
+
It is intended for usage with numeric types, where the value represents the
|
| 304 |
+
magnitude of the quantity. For example, ``distance: Annotated[float, Unit('m')]``
|
| 305 |
+
or ``speed: Annotated[float, Unit('m/s')]``.
|
| 306 |
+
|
| 307 |
+
Interpretation of the unit string is left to the discretion of the consumer.
|
| 308 |
+
It is suggested to follow conventions established by python libraries that work
|
| 309 |
+
with physical quantities, such as
|
| 310 |
+
|
| 311 |
+
- ``pint`` : <https://pint.readthedocs.io/en/stable/>
|
| 312 |
+
- ``astropy.units``: <https://docs.astropy.org/en/stable/units/>
|
| 313 |
+
|
| 314 |
+
For indicating a quantity with a certain dimensionality but without a specific unit
|
| 315 |
+
it is recommended to use square brackets, e.g. `Annotated[float, Unit('[time]')]`.
|
| 316 |
+
Note, however, ``annotated_types`` itself makes no use of the unit string.
|
| 317 |
+
"""
|
| 318 |
+
|
| 319 |
+
unit: str
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
@dataclass(frozen=True, **SLOTS)
|
| 323 |
+
class Predicate(BaseMetadata):
|
| 324 |
+
"""``Predicate(func: Callable)`` implies `func(value)` is truthy for valid values.
|
| 325 |
+
|
| 326 |
+
Users should prefer statically inspectable metadata, but if you need the full
|
| 327 |
+
power and flexibility of arbitrary runtime predicates... here it is.
|
| 328 |
+
|
| 329 |
+
We provide a few predefined predicates for common string constraints:
|
| 330 |
+
``IsLower = Predicate(str.islower)``, ``IsUpper = Predicate(str.isupper)``, and
|
| 331 |
+
``IsDigits = Predicate(str.isdigit)``. Users are encouraged to use methods which
|
| 332 |
+
can be given special handling, and avoid indirection like ``lambda s: s.lower()``.
|
| 333 |
+
|
| 334 |
+
Some libraries might have special logic to handle certain predicates, e.g. by
|
| 335 |
+
checking for `str.isdigit` and using its presence to both call custom logic to
|
| 336 |
+
enforce digit-only strings, and customise some generated external schema.
|
| 337 |
+
|
| 338 |
+
We do not specify what behaviour should be expected for predicates that raise
|
| 339 |
+
an exception. For example `Annotated[int, Predicate(str.isdigit)]` might silently
|
| 340 |
+
skip invalid constraints, or statically raise an error; or it might try calling it
|
| 341 |
+
and then propagate or discard the resulting exception.
|
| 342 |
+
"""
|
| 343 |
+
|
| 344 |
+
func: Callable[[Any], bool]
|
| 345 |
+
|
| 346 |
+
def __repr__(self) -> str:
|
| 347 |
+
if getattr(self.func, "__name__", "<lambda>") == "<lambda>":
|
| 348 |
+
return f"{self.__class__.__name__}({self.func!r})"
|
| 349 |
+
if isinstance(self.func, (types.MethodType, types.BuiltinMethodType)) and (
|
| 350 |
+
namespace := getattr(self.func.__self__, "__name__", None)
|
| 351 |
+
):
|
| 352 |
+
return f"{self.__class__.__name__}({namespace}.{self.func.__name__})"
|
| 353 |
+
if isinstance(self.func, type(str.isascii)): # method descriptor
|
| 354 |
+
return f"{self.__class__.__name__}({self.func.__qualname__})"
|
| 355 |
+
return f"{self.__class__.__name__}({self.func.__name__})"
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
@dataclass
|
| 359 |
+
class Not:
|
| 360 |
+
func: Callable[[Any], bool]
|
| 361 |
+
|
| 362 |
+
def __call__(self, __v: Any) -> bool:
|
| 363 |
+
return not self.func(__v)
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
_StrType = TypeVar("_StrType", bound=str)
|
| 367 |
+
|
| 368 |
+
LowerCase = Annotated[_StrType, Predicate(str.islower)]
|
| 369 |
+
"""
|
| 370 |
+
Return True if the string is a lowercase string, False otherwise.
|
| 371 |
+
|
| 372 |
+
A string is lowercase if all cased characters in the string are lowercase and there is at least one cased character in the string.
|
| 373 |
+
""" # noqa: E501
|
| 374 |
+
UpperCase = Annotated[_StrType, Predicate(str.isupper)]
|
| 375 |
+
"""
|
| 376 |
+
Return True if the string is an uppercase string, False otherwise.
|
| 377 |
+
|
| 378 |
+
A string is uppercase if all cased characters in the string are uppercase and there is at least one cased character in the string.
|
| 379 |
+
""" # noqa: E501
|
| 380 |
+
IsDigit = Annotated[_StrType, Predicate(str.isdigit)]
|
| 381 |
+
IsDigits = IsDigit # type: ignore # plural for backwards compatibility, see #63
|
| 382 |
+
"""
|
| 383 |
+
Return True if the string is a digit string, False otherwise.
|
| 384 |
+
|
| 385 |
+
A string is a digit string if all characters in the string are digits and there is at least one character in the string.
|
| 386 |
+
""" # noqa: E501
|
| 387 |
+
IsAscii = Annotated[_StrType, Predicate(str.isascii)]
|
| 388 |
+
"""
|
| 389 |
+
Return True if all characters in the string are ASCII, False otherwise.
|
| 390 |
+
|
| 391 |
+
ASCII characters have code points in the range U+0000-U+007F. Empty string is ASCII too.
|
| 392 |
+
"""
|
| 393 |
+
|
| 394 |
+
_NumericType = TypeVar('_NumericType', bound=Union[SupportsFloat, SupportsIndex])
|
| 395 |
+
IsFinite = Annotated[_NumericType, Predicate(math.isfinite)]
|
| 396 |
+
"""Return True if x is neither an infinity nor a NaN, and False otherwise."""
|
| 397 |
+
IsNotFinite = Annotated[_NumericType, Predicate(Not(math.isfinite))]
|
| 398 |
+
"""Return True if x is one of infinity or NaN, and False otherwise"""
|
| 399 |
+
IsNan = Annotated[_NumericType, Predicate(math.isnan)]
|
| 400 |
+
"""Return True if x is a NaN (not a number), and False otherwise."""
|
| 401 |
+
IsNotNan = Annotated[_NumericType, Predicate(Not(math.isnan))]
|
| 402 |
+
"""Return True if x is anything but NaN (not a number), and False otherwise."""
|
| 403 |
+
IsInfinite = Annotated[_NumericType, Predicate(math.isinf)]
|
| 404 |
+
"""Return True if x is a positive or negative infinity, and False otherwise."""
|
| 405 |
+
IsNotInfinite = Annotated[_NumericType, Predicate(Not(math.isinf))]
|
| 406 |
+
"""Return True if x is neither a positive or negative infinity, and False otherwise."""
|
| 407 |
+
|
| 408 |
+
try:
|
| 409 |
+
from typing_extensions import DocInfo, doc # type: ignore [attr-defined]
|
| 410 |
+
except ImportError:
|
| 411 |
+
|
| 412 |
+
@dataclass(frozen=True, **SLOTS)
|
| 413 |
+
class DocInfo: # type: ignore [no-redef]
|
| 414 |
+
""" "
|
| 415 |
+
The return value of doc(), mainly to be used by tools that want to extract the
|
| 416 |
+
Annotated documentation at runtime.
|
| 417 |
+
"""
|
| 418 |
+
|
| 419 |
+
documentation: str
|
| 420 |
+
"""The documentation string passed to doc()."""
|
| 421 |
+
|
| 422 |
+
def doc(
|
| 423 |
+
documentation: str,
|
| 424 |
+
) -> DocInfo:
|
| 425 |
+
"""
|
| 426 |
+
Add documentation to a type annotation inside of Annotated.
|
| 427 |
+
|
| 428 |
+
For example:
|
| 429 |
+
|
| 430 |
+
>>> def hi(name: Annotated[int, doc("The name of the user")]) -> None: ...
|
| 431 |
+
"""
|
| 432 |
+
return DocInfo(documentation)
|
vllm/lib/python3.10/site-packages/annotated_types/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (14.6 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/annotated_types/__pycache__/test_cases.cpython-310.pyc
ADDED
|
Binary file (5.62 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/annotated_types/py.typed
ADDED
|
File without changes
|
vllm/lib/python3.10/site-packages/annotated_types/test_cases.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import sys
|
| 3 |
+
from datetime import date, datetime, timedelta, timezone
|
| 4 |
+
from decimal import Decimal
|
| 5 |
+
from typing import Any, Dict, Iterable, Iterator, List, NamedTuple, Set, Tuple
|
| 6 |
+
|
| 7 |
+
if sys.version_info < (3, 9):
|
| 8 |
+
from typing_extensions import Annotated
|
| 9 |
+
else:
|
| 10 |
+
from typing import Annotated
|
| 11 |
+
|
| 12 |
+
import annotated_types as at
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class Case(NamedTuple):
|
| 16 |
+
"""
|
| 17 |
+
A test case for `annotated_types`.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
annotation: Any
|
| 21 |
+
valid_cases: Iterable[Any]
|
| 22 |
+
invalid_cases: Iterable[Any]
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def cases() -> Iterable[Case]:
|
| 26 |
+
# Gt, Ge, Lt, Le
|
| 27 |
+
yield Case(Annotated[int, at.Gt(4)], (5, 6, 1000), (4, 0, -1))
|
| 28 |
+
yield Case(Annotated[float, at.Gt(0.5)], (0.6, 0.7, 0.8, 0.9), (0.5, 0.0, -0.1))
|
| 29 |
+
yield Case(
|
| 30 |
+
Annotated[datetime, at.Gt(datetime(2000, 1, 1))],
|
| 31 |
+
[datetime(2000, 1, 2), datetime(2000, 1, 3)],
|
| 32 |
+
[datetime(2000, 1, 1), datetime(1999, 12, 31)],
|
| 33 |
+
)
|
| 34 |
+
yield Case(
|
| 35 |
+
Annotated[datetime, at.Gt(date(2000, 1, 1))],
|
| 36 |
+
[date(2000, 1, 2), date(2000, 1, 3)],
|
| 37 |
+
[date(2000, 1, 1), date(1999, 12, 31)],
|
| 38 |
+
)
|
| 39 |
+
yield Case(
|
| 40 |
+
Annotated[datetime, at.Gt(Decimal('1.123'))],
|
| 41 |
+
[Decimal('1.1231'), Decimal('123')],
|
| 42 |
+
[Decimal('1.123'), Decimal('0')],
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
yield Case(Annotated[int, at.Ge(4)], (4, 5, 6, 1000, 4), (0, -1))
|
| 46 |
+
yield Case(Annotated[float, at.Ge(0.5)], (0.5, 0.6, 0.7, 0.8, 0.9), (0.4, 0.0, -0.1))
|
| 47 |
+
yield Case(
|
| 48 |
+
Annotated[datetime, at.Ge(datetime(2000, 1, 1))],
|
| 49 |
+
[datetime(2000, 1, 2), datetime(2000, 1, 3)],
|
| 50 |
+
[datetime(1998, 1, 1), datetime(1999, 12, 31)],
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
yield Case(Annotated[int, at.Lt(4)], (0, -1), (4, 5, 6, 1000, 4))
|
| 54 |
+
yield Case(Annotated[float, at.Lt(0.5)], (0.4, 0.0, -0.1), (0.5, 0.6, 0.7, 0.8, 0.9))
|
| 55 |
+
yield Case(
|
| 56 |
+
Annotated[datetime, at.Lt(datetime(2000, 1, 1))],
|
| 57 |
+
[datetime(1999, 12, 31), datetime(1999, 12, 31)],
|
| 58 |
+
[datetime(2000, 1, 2), datetime(2000, 1, 3)],
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
yield Case(Annotated[int, at.Le(4)], (4, 0, -1), (5, 6, 1000))
|
| 62 |
+
yield Case(Annotated[float, at.Le(0.5)], (0.5, 0.0, -0.1), (0.6, 0.7, 0.8, 0.9))
|
| 63 |
+
yield Case(
|
| 64 |
+
Annotated[datetime, at.Le(datetime(2000, 1, 1))],
|
| 65 |
+
[datetime(2000, 1, 1), datetime(1999, 12, 31)],
|
| 66 |
+
[datetime(2000, 1, 2), datetime(2000, 1, 3)],
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
# Interval
|
| 70 |
+
yield Case(Annotated[int, at.Interval(gt=4)], (5, 6, 1000), (4, 0, -1))
|
| 71 |
+
yield Case(Annotated[int, at.Interval(gt=4, lt=10)], (5, 6), (4, 10, 1000, 0, -1))
|
| 72 |
+
yield Case(Annotated[float, at.Interval(ge=0.5, le=1)], (0.5, 0.9, 1), (0.49, 1.1))
|
| 73 |
+
yield Case(
|
| 74 |
+
Annotated[datetime, at.Interval(gt=datetime(2000, 1, 1), le=datetime(2000, 1, 3))],
|
| 75 |
+
[datetime(2000, 1, 2), datetime(2000, 1, 3)],
|
| 76 |
+
[datetime(2000, 1, 1), datetime(2000, 1, 4)],
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
yield Case(Annotated[int, at.MultipleOf(multiple_of=3)], (0, 3, 9), (1, 2, 4))
|
| 80 |
+
yield Case(Annotated[float, at.MultipleOf(multiple_of=0.5)], (0, 0.5, 1, 1.5), (0.4, 1.1))
|
| 81 |
+
|
| 82 |
+
# lengths
|
| 83 |
+
|
| 84 |
+
yield Case(Annotated[str, at.MinLen(3)], ('123', '1234', 'x' * 10), ('', '1', '12'))
|
| 85 |
+
yield Case(Annotated[str, at.Len(3)], ('123', '1234', 'x' * 10), ('', '1', '12'))
|
| 86 |
+
yield Case(Annotated[List[int], at.MinLen(3)], ([1, 2, 3], [1, 2, 3, 4], [1] * 10), ([], [1], [1, 2]))
|
| 87 |
+
yield Case(Annotated[List[int], at.Len(3)], ([1, 2, 3], [1, 2, 3, 4], [1] * 10), ([], [1], [1, 2]))
|
| 88 |
+
|
| 89 |
+
yield Case(Annotated[str, at.MaxLen(4)], ('', '1234'), ('12345', 'x' * 10))
|
| 90 |
+
yield Case(Annotated[str, at.Len(0, 4)], ('', '1234'), ('12345', 'x' * 10))
|
| 91 |
+
yield Case(Annotated[List[str], at.MaxLen(4)], ([], ['a', 'bcdef'], ['a', 'b', 'c']), (['a'] * 5, ['b'] * 10))
|
| 92 |
+
yield Case(Annotated[List[str], at.Len(0, 4)], ([], ['a', 'bcdef'], ['a', 'b', 'c']), (['a'] * 5, ['b'] * 10))
|
| 93 |
+
|
| 94 |
+
yield Case(Annotated[str, at.Len(3, 5)], ('123', '12345'), ('', '1', '12', '123456', 'x' * 10))
|
| 95 |
+
yield Case(Annotated[str, at.Len(3, 3)], ('123',), ('12', '1234'))
|
| 96 |
+
|
| 97 |
+
yield Case(Annotated[Dict[int, int], at.Len(2, 3)], [{1: 1, 2: 2}], [{}, {1: 1}, {1: 1, 2: 2, 3: 3, 4: 4}])
|
| 98 |
+
yield Case(Annotated[Set[int], at.Len(2, 3)], ({1, 2}, {1, 2, 3}), (set(), {1}, {1, 2, 3, 4}))
|
| 99 |
+
yield Case(Annotated[Tuple[int, ...], at.Len(2, 3)], ((1, 2), (1, 2, 3)), ((), (1,), (1, 2, 3, 4)))
|
| 100 |
+
|
| 101 |
+
# Timezone
|
| 102 |
+
|
| 103 |
+
yield Case(
|
| 104 |
+
Annotated[datetime, at.Timezone(None)], [datetime(2000, 1, 1)], [datetime(2000, 1, 1, tzinfo=timezone.utc)]
|
| 105 |
+
)
|
| 106 |
+
yield Case(
|
| 107 |
+
Annotated[datetime, at.Timezone(...)], [datetime(2000, 1, 1, tzinfo=timezone.utc)], [datetime(2000, 1, 1)]
|
| 108 |
+
)
|
| 109 |
+
yield Case(
|
| 110 |
+
Annotated[datetime, at.Timezone(timezone.utc)],
|
| 111 |
+
[datetime(2000, 1, 1, tzinfo=timezone.utc)],
|
| 112 |
+
[datetime(2000, 1, 1), datetime(2000, 1, 1, tzinfo=timezone(timedelta(hours=6)))],
|
| 113 |
+
)
|
| 114 |
+
yield Case(
|
| 115 |
+
Annotated[datetime, at.Timezone('Europe/London')],
|
| 116 |
+
[datetime(2000, 1, 1, tzinfo=timezone(timedelta(0), name='Europe/London'))],
|
| 117 |
+
[datetime(2000, 1, 1), datetime(2000, 1, 1, tzinfo=timezone(timedelta(hours=6)))],
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
# Quantity
|
| 121 |
+
|
| 122 |
+
yield Case(Annotated[float, at.Unit(unit='m')], (5, 4.2), ('5m', '4.2m'))
|
| 123 |
+
|
| 124 |
+
# predicate types
|
| 125 |
+
|
| 126 |
+
yield Case(at.LowerCase[str], ['abc', 'foobar'], ['', 'A', 'Boom'])
|
| 127 |
+
yield Case(at.UpperCase[str], ['ABC', 'DEFO'], ['', 'a', 'abc', 'AbC'])
|
| 128 |
+
yield Case(at.IsDigit[str], ['123'], ['', 'ab', 'a1b2'])
|
| 129 |
+
yield Case(at.IsAscii[str], ['123', 'foo bar'], ['£100', '😊', 'whatever 👀'])
|
| 130 |
+
|
| 131 |
+
yield Case(Annotated[int, at.Predicate(lambda x: x % 2 == 0)], [0, 2, 4], [1, 3, 5])
|
| 132 |
+
|
| 133 |
+
yield Case(at.IsFinite[float], [1.23], [math.nan, math.inf, -math.inf])
|
| 134 |
+
yield Case(at.IsNotFinite[float], [math.nan, math.inf], [1.23])
|
| 135 |
+
yield Case(at.IsNan[float], [math.nan], [1.23, math.inf])
|
| 136 |
+
yield Case(at.IsNotNan[float], [1.23, math.inf], [math.nan])
|
| 137 |
+
yield Case(at.IsInfinite[float], [math.inf], [math.nan, 1.23])
|
| 138 |
+
yield Case(at.IsNotInfinite[float], [math.nan, 1.23], [math.inf])
|
| 139 |
+
|
| 140 |
+
# check stacked predicates
|
| 141 |
+
yield Case(at.IsInfinite[Annotated[float, at.Predicate(lambda x: x > 0)]], [math.inf], [-math.inf, 1.23, math.nan])
|
| 142 |
+
|
| 143 |
+
# doc
|
| 144 |
+
yield Case(Annotated[int, at.doc("A number")], [1, 2], [])
|
| 145 |
+
|
| 146 |
+
# custom GroupedMetadata
|
| 147 |
+
class MyCustomGroupedMetadata(at.GroupedMetadata):
|
| 148 |
+
def __iter__(self) -> Iterator[at.Predicate]:
|
| 149 |
+
yield at.Predicate(lambda x: float(x).is_integer())
|
| 150 |
+
|
| 151 |
+
yield Case(Annotated[float, MyCustomGroupedMetadata()], [0, 2.0], [0.01, 1.5])
|
vllm/lib/python3.10/site-packages/av-14.1.0.dist-info/METADATA
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.2
|
| 2 |
+
Name: av
|
| 3 |
+
Version: 14.1.0
|
| 4 |
+
Summary: Pythonic bindings for FFmpeg's libraries.
|
| 5 |
+
Home-page: https://github.com/PyAV-Org/PyAV
|
| 6 |
+
Author: Mike Boers
|
| 7 |
+
Author-email: pyav@mikeboers.com
|
| 8 |
+
License: BSD
|
| 9 |
+
Project-URL: Bug Reports, https://github.com/PyAV-Org/PyAV/discussions/new?category=4-bugs
|
| 10 |
+
Project-URL: Documentation, https://pyav.basswood-io.com
|
| 11 |
+
Project-URL: Download, https://pypi.org/project/av
|
| 12 |
+
Classifier: Development Status :: 5 - Production/Stable
|
| 13 |
+
Classifier: Intended Audience :: Developers
|
| 14 |
+
Classifier: License :: OSI Approved :: BSD License
|
| 15 |
+
Classifier: Natural Language :: English
|
| 16 |
+
Classifier: Operating System :: MacOS :: MacOS X
|
| 17 |
+
Classifier: Operating System :: POSIX
|
| 18 |
+
Classifier: Operating System :: Unix
|
| 19 |
+
Classifier: Operating System :: Microsoft :: Windows
|
| 20 |
+
Classifier: Programming Language :: Cython
|
| 21 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 22 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 23 |
+
Classifier: Programming Language :: Python :: 3.11
|
| 24 |
+
Classifier: Programming Language :: Python :: 3.12
|
| 25 |
+
Classifier: Programming Language :: Python :: 3.13
|
| 26 |
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
| 27 |
+
Classifier: Topic :: Multimedia :: Sound/Audio
|
| 28 |
+
Classifier: Topic :: Multimedia :: Sound/Audio :: Conversion
|
| 29 |
+
Classifier: Topic :: Multimedia :: Video
|
| 30 |
+
Classifier: Topic :: Multimedia :: Video :: Conversion
|
| 31 |
+
Requires-Python: >=3.9
|
| 32 |
+
Description-Content-Type: text/markdown
|
| 33 |
+
License-File: LICENSE.txt
|
| 34 |
+
License-File: AUTHORS.py
|
| 35 |
+
License-File: AUTHORS.rst
|
| 36 |
+
Dynamic: author
|
| 37 |
+
Dynamic: author-email
|
| 38 |
+
Dynamic: classifier
|
| 39 |
+
Dynamic: description
|
| 40 |
+
Dynamic: description-content-type
|
| 41 |
+
Dynamic: home-page
|
| 42 |
+
Dynamic: license
|
| 43 |
+
Dynamic: project-url
|
| 44 |
+
Dynamic: requires-python
|
| 45 |
+
Dynamic: summary
|
| 46 |
+
|
| 47 |
+
PyAV
|
| 48 |
+
====
|
| 49 |
+
|
| 50 |
+
PyAV is a Pythonic binding for the [FFmpeg][ffmpeg] libraries. We aim to provide all of the power and control of the underlying library, but manage the gritty details as much as possible.
|
| 51 |
+
|
| 52 |
+
---
|
| 53 |
+
|
| 54 |
+
[![GitHub Test Status][github-tests-badge]][github-tests] [![Documentation][docs-badge]][docs] [![Python Package Index][pypi-badge]][pypi] [![Conda Forge][conda-badge]][conda]
|
| 55 |
+
|
| 56 |
+
PyAV is for direct and precise access to your media via containers, streams, packets, codecs, and frames. It exposes a few transformations of that data, and helps you get your data to/from other packages (e.g. Numpy and Pillow).
|
| 57 |
+
|
| 58 |
+
This power does come with some responsibility as working with media is horrendously complicated and PyAV can't abstract it away or make all the best decisions for you. If the `ffmpeg` command does the job without you bending over backwards, PyAV is likely going to be more of a hindrance than a help.
|
| 59 |
+
|
| 60 |
+
But where you can't work without it, PyAV is a critical tool.
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
Installation
|
| 64 |
+
------------
|
| 65 |
+
|
| 66 |
+
Due to the complexity of the dependencies, PyAV is not always the easiest Python package to install from source. Since release 8.0.0 binary wheels are provided on [PyPI][pypi] for Linux, Mac and Windows linked against a modern FFmpeg. You can install these wheels by running:
|
| 67 |
+
|
| 68 |
+
```bash
|
| 69 |
+
pip install av
|
| 70 |
+
```
|
| 71 |
+
|
| 72 |
+
If you want to use your existing FFmpeg, the source version of PyAV is on [PyPI][pypi] too:
|
| 73 |
+
|
| 74 |
+
```bash
|
| 75 |
+
pip install av --no-binary av
|
| 76 |
+
```
|
| 77 |
+
|
| 78 |
+
Installing from source is not supported on Windows.
|
| 79 |
+
|
| 80 |
+
Alternative installation methods
|
| 81 |
+
--------------------------------
|
| 82 |
+
|
| 83 |
+
Another way of installing PyAV is via [conda-forge][conda-forge]:
|
| 84 |
+
|
| 85 |
+
```bash
|
| 86 |
+
conda install av -c conda-forge
|
| 87 |
+
```
|
| 88 |
+
|
| 89 |
+
See the [Conda install][conda-install] docs to get started with (mini)Conda.
|
| 90 |
+
|
| 91 |
+
And if you want to build from the absolute source (POSIX only):
|
| 92 |
+
|
| 93 |
+
```bash
|
| 94 |
+
git clone https://github.com/PyAV-Org/PyAV.git
|
| 95 |
+
cd PyAV
|
| 96 |
+
source scripts/activate.sh
|
| 97 |
+
|
| 98 |
+
# Build ffmpeg from source. You can skip this step
|
| 99 |
+
# if ffmpeg is already installed.
|
| 100 |
+
./scripts/build-deps
|
| 101 |
+
|
| 102 |
+
# Build PyAV
|
| 103 |
+
make
|
| 104 |
+
|
| 105 |
+
# Testing
|
| 106 |
+
make test
|
| 107 |
+
|
| 108 |
+
# Install globally
|
| 109 |
+
deactivate
|
| 110 |
+
pip install .
|
| 111 |
+
```
|
| 112 |
+
|
| 113 |
+
---
|
| 114 |
+
|
| 115 |
+
Have fun, [read the docs][docs], [come chat with us][discuss], and good luck!
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
[conda-badge]: https://img.shields.io/conda/vn/conda-forge/av.svg?colorB=CCB39A
|
| 120 |
+
[conda]: https://anaconda.org/conda-forge/av
|
| 121 |
+
[docs-badge]: https://img.shields.io/badge/docs-on%20pyav.basswood--io.com-blue.svg
|
| 122 |
+
[docs]: https://pyav.basswood-io.com
|
| 123 |
+
[pypi-badge]: https://img.shields.io/pypi/v/av.svg?colorB=CCB39A
|
| 124 |
+
[pypi]: https://pypi.org/project/av
|
| 125 |
+
[discuss]: https://github.com/PyAV-Org/PyAV/discussions
|
| 126 |
+
|
| 127 |
+
[github-tests-badge]: https://github.com/PyAV-Org/PyAV/workflows/tests/badge.svg
|
| 128 |
+
[github-tests]: https://github.com/PyAV-Org/PyAV/actions?workflow=tests
|
| 129 |
+
[github]: https://github.com/PyAV-Org/PyAV
|
| 130 |
+
|
| 131 |
+
[ffmpeg]: https://ffmpeg.org/
|
| 132 |
+
[conda-forge]: https://conda-forge.github.io/
|
| 133 |
+
[conda-install]: https://docs.conda.io/projects/conda/en/latest/user-guide/install/index.html
|
vllm/lib/python3.10/site-packages/gitdb/__init__.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
|
| 2 |
+
#
|
| 3 |
+
# This module is part of GitDB and is released under
|
| 4 |
+
# the New BSD License: https://opensource.org/license/bsd-3-clause/
|
| 5 |
+
"""Initialize the object database module"""
|
| 6 |
+
|
| 7 |
+
__author__ = "Sebastian Thiel"
|
| 8 |
+
__contact__ = "byronimo@gmail.com"
|
| 9 |
+
__homepage__ = "https://github.com/gitpython-developers/gitdb"
|
| 10 |
+
version_info = (4, 0, 12)
|
| 11 |
+
__version__ = '.'.join(str(i) for i in version_info)
|
| 12 |
+
|
| 13 |
+
# default imports
|
| 14 |
+
from gitdb.base import *
|
| 15 |
+
from gitdb.db import *
|
| 16 |
+
from gitdb.stream import *
|
vllm/lib/python3.10/site-packages/gitdb/base.py
ADDED
|
@@ -0,0 +1,315 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
|
| 2 |
+
#
|
| 3 |
+
# This module is part of GitDB and is released under
|
| 4 |
+
# the New BSD License: https://opensource.org/license/bsd-3-clause/
|
| 5 |
+
"""Module with basic data structures - they are designed to be lightweight and fast"""
|
| 6 |
+
from gitdb.util import bin_to_hex
|
| 7 |
+
|
| 8 |
+
from gitdb.fun import (
|
| 9 |
+
type_id_to_type_map,
|
| 10 |
+
type_to_type_id_map
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
__all__ = ('OInfo', 'OPackInfo', 'ODeltaPackInfo',
|
| 14 |
+
'OStream', 'OPackStream', 'ODeltaPackStream',
|
| 15 |
+
'IStream', 'InvalidOInfo', 'InvalidOStream')
|
| 16 |
+
|
| 17 |
+
#{ ODB Bases
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class OInfo(tuple):
|
| 21 |
+
|
| 22 |
+
"""Carries information about an object in an ODB, providing information
|
| 23 |
+
about the binary sha of the object, the type_string as well as the uncompressed size
|
| 24 |
+
in bytes.
|
| 25 |
+
|
| 26 |
+
It can be accessed using tuple notation and using attribute access notation::
|
| 27 |
+
|
| 28 |
+
assert dbi[0] == dbi.binsha
|
| 29 |
+
assert dbi[1] == dbi.type
|
| 30 |
+
assert dbi[2] == dbi.size
|
| 31 |
+
|
| 32 |
+
The type is designed to be as lightweight as possible."""
|
| 33 |
+
__slots__ = tuple()
|
| 34 |
+
|
| 35 |
+
def __new__(cls, sha, type, size):
|
| 36 |
+
return tuple.__new__(cls, (sha, type, size))
|
| 37 |
+
|
| 38 |
+
def __init__(self, *args):
|
| 39 |
+
tuple.__init__(self)
|
| 40 |
+
|
| 41 |
+
#{ Interface
|
| 42 |
+
@property
|
| 43 |
+
def binsha(self):
|
| 44 |
+
""":return: our sha as binary, 20 bytes"""
|
| 45 |
+
return self[0]
|
| 46 |
+
|
| 47 |
+
@property
|
| 48 |
+
def hexsha(self):
|
| 49 |
+
""":return: our sha, hex encoded, 40 bytes"""
|
| 50 |
+
return bin_to_hex(self[0])
|
| 51 |
+
|
| 52 |
+
@property
|
| 53 |
+
def type(self):
|
| 54 |
+
return self[1]
|
| 55 |
+
|
| 56 |
+
@property
|
| 57 |
+
def type_id(self):
|
| 58 |
+
return type_to_type_id_map[self[1]]
|
| 59 |
+
|
| 60 |
+
@property
|
| 61 |
+
def size(self):
|
| 62 |
+
return self[2]
|
| 63 |
+
#} END interface
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class OPackInfo(tuple):
|
| 67 |
+
|
| 68 |
+
"""As OInfo, but provides a type_id property to retrieve the numerical type id, and
|
| 69 |
+
does not include a sha.
|
| 70 |
+
|
| 71 |
+
Additionally, the pack_offset is the absolute offset into the packfile at which
|
| 72 |
+
all object information is located. The data_offset property points to the absolute
|
| 73 |
+
location in the pack at which that actual data stream can be found."""
|
| 74 |
+
__slots__ = tuple()
|
| 75 |
+
|
| 76 |
+
def __new__(cls, packoffset, type, size):
|
| 77 |
+
return tuple.__new__(cls, (packoffset, type, size))
|
| 78 |
+
|
| 79 |
+
def __init__(self, *args):
|
| 80 |
+
tuple.__init__(self)
|
| 81 |
+
|
| 82 |
+
#{ Interface
|
| 83 |
+
|
| 84 |
+
@property
|
| 85 |
+
def pack_offset(self):
|
| 86 |
+
return self[0]
|
| 87 |
+
|
| 88 |
+
@property
|
| 89 |
+
def type(self):
|
| 90 |
+
return type_id_to_type_map[self[1]]
|
| 91 |
+
|
| 92 |
+
@property
|
| 93 |
+
def type_id(self):
|
| 94 |
+
return self[1]
|
| 95 |
+
|
| 96 |
+
@property
|
| 97 |
+
def size(self):
|
| 98 |
+
return self[2]
|
| 99 |
+
|
| 100 |
+
#} END interface
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class ODeltaPackInfo(OPackInfo):
|
| 104 |
+
|
| 105 |
+
"""Adds delta specific information,
|
| 106 |
+
Either the 20 byte sha which points to some object in the database,
|
| 107 |
+
or the negative offset from the pack_offset, so that pack_offset - delta_info yields
|
| 108 |
+
the pack offset of the base object"""
|
| 109 |
+
__slots__ = tuple()
|
| 110 |
+
|
| 111 |
+
def __new__(cls, packoffset, type, size, delta_info):
|
| 112 |
+
return tuple.__new__(cls, (packoffset, type, size, delta_info))
|
| 113 |
+
|
| 114 |
+
#{ Interface
|
| 115 |
+
@property
|
| 116 |
+
def delta_info(self):
|
| 117 |
+
return self[3]
|
| 118 |
+
#} END interface
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
class OStream(OInfo):
|
| 122 |
+
|
| 123 |
+
"""Base for object streams retrieved from the database, providing additional
|
| 124 |
+
information about the stream.
|
| 125 |
+
Generally, ODB streams are read-only as objects are immutable"""
|
| 126 |
+
__slots__ = tuple()
|
| 127 |
+
|
| 128 |
+
def __new__(cls, sha, type, size, stream, *args, **kwargs):
|
| 129 |
+
"""Helps with the initialization of subclasses"""
|
| 130 |
+
return tuple.__new__(cls, (sha, type, size, stream))
|
| 131 |
+
|
| 132 |
+
def __init__(self, *args, **kwargs):
|
| 133 |
+
tuple.__init__(self)
|
| 134 |
+
|
| 135 |
+
#{ Stream Reader Interface
|
| 136 |
+
|
| 137 |
+
def read(self, size=-1):
|
| 138 |
+
return self[3].read(size)
|
| 139 |
+
|
| 140 |
+
@property
|
| 141 |
+
def stream(self):
|
| 142 |
+
return self[3]
|
| 143 |
+
|
| 144 |
+
#} END stream reader interface
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
class ODeltaStream(OStream):
|
| 148 |
+
|
| 149 |
+
"""Uses size info of its stream, delaying reads"""
|
| 150 |
+
|
| 151 |
+
def __new__(cls, sha, type, size, stream, *args, **kwargs):
|
| 152 |
+
"""Helps with the initialization of subclasses"""
|
| 153 |
+
return tuple.__new__(cls, (sha, type, size, stream))
|
| 154 |
+
|
| 155 |
+
#{ Stream Reader Interface
|
| 156 |
+
|
| 157 |
+
@property
|
| 158 |
+
def size(self):
|
| 159 |
+
return self[3].size
|
| 160 |
+
|
| 161 |
+
#} END stream reader interface
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
class OPackStream(OPackInfo):
|
| 165 |
+
|
| 166 |
+
"""Next to pack object information, a stream outputting an undeltified base object
|
| 167 |
+
is provided"""
|
| 168 |
+
__slots__ = tuple()
|
| 169 |
+
|
| 170 |
+
def __new__(cls, packoffset, type, size, stream, *args):
|
| 171 |
+
"""Helps with the initialization of subclasses"""
|
| 172 |
+
return tuple.__new__(cls, (packoffset, type, size, stream))
|
| 173 |
+
|
| 174 |
+
#{ Stream Reader Interface
|
| 175 |
+
def read(self, size=-1):
|
| 176 |
+
return self[3].read(size)
|
| 177 |
+
|
| 178 |
+
@property
|
| 179 |
+
def stream(self):
|
| 180 |
+
return self[3]
|
| 181 |
+
#} END stream reader interface
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
class ODeltaPackStream(ODeltaPackInfo):
|
| 185 |
+
|
| 186 |
+
"""Provides a stream outputting the uncompressed offset delta information"""
|
| 187 |
+
__slots__ = tuple()
|
| 188 |
+
|
| 189 |
+
def __new__(cls, packoffset, type, size, delta_info, stream):
|
| 190 |
+
return tuple.__new__(cls, (packoffset, type, size, delta_info, stream))
|
| 191 |
+
|
| 192 |
+
#{ Stream Reader Interface
|
| 193 |
+
def read(self, size=-1):
|
| 194 |
+
return self[4].read(size)
|
| 195 |
+
|
| 196 |
+
@property
|
| 197 |
+
def stream(self):
|
| 198 |
+
return self[4]
|
| 199 |
+
#} END stream reader interface
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
class IStream(list):
|
| 203 |
+
|
| 204 |
+
"""Represents an input content stream to be fed into the ODB. It is mutable to allow
|
| 205 |
+
the ODB to record information about the operations outcome right in this instance.
|
| 206 |
+
|
| 207 |
+
It provides interfaces for the OStream and a StreamReader to allow the instance
|
| 208 |
+
to blend in without prior conversion.
|
| 209 |
+
|
| 210 |
+
The only method your content stream must support is 'read'"""
|
| 211 |
+
__slots__ = tuple()
|
| 212 |
+
|
| 213 |
+
def __new__(cls, type, size, stream, sha=None):
|
| 214 |
+
return list.__new__(cls, (sha, type, size, stream, None))
|
| 215 |
+
|
| 216 |
+
def __init__(self, type, size, stream, sha=None):
|
| 217 |
+
list.__init__(self, (sha, type, size, stream, None))
|
| 218 |
+
|
| 219 |
+
#{ Interface
|
| 220 |
+
@property
|
| 221 |
+
def hexsha(self):
|
| 222 |
+
""":return: our sha, hex encoded, 40 bytes"""
|
| 223 |
+
return bin_to_hex(self[0])
|
| 224 |
+
|
| 225 |
+
def _error(self):
|
| 226 |
+
""":return: the error that occurred when processing the stream, or None"""
|
| 227 |
+
return self[4]
|
| 228 |
+
|
| 229 |
+
def _set_error(self, exc):
|
| 230 |
+
"""Set this input stream to the given exc, may be None to reset the error"""
|
| 231 |
+
self[4] = exc
|
| 232 |
+
|
| 233 |
+
error = property(_error, _set_error)
|
| 234 |
+
|
| 235 |
+
#} END interface
|
| 236 |
+
|
| 237 |
+
#{ Stream Reader Interface
|
| 238 |
+
|
| 239 |
+
def read(self, size=-1):
|
| 240 |
+
"""Implements a simple stream reader interface, passing the read call on
|
| 241 |
+
to our internal stream"""
|
| 242 |
+
return self[3].read(size)
|
| 243 |
+
|
| 244 |
+
#} END stream reader interface
|
| 245 |
+
|
| 246 |
+
#{ interface
|
| 247 |
+
|
| 248 |
+
def _set_binsha(self, binsha):
|
| 249 |
+
self[0] = binsha
|
| 250 |
+
|
| 251 |
+
def _binsha(self):
|
| 252 |
+
return self[0]
|
| 253 |
+
|
| 254 |
+
binsha = property(_binsha, _set_binsha)
|
| 255 |
+
|
| 256 |
+
def _type(self):
|
| 257 |
+
return self[1]
|
| 258 |
+
|
| 259 |
+
def _set_type(self, type):
|
| 260 |
+
self[1] = type
|
| 261 |
+
|
| 262 |
+
type = property(_type, _set_type)
|
| 263 |
+
|
| 264 |
+
def _size(self):
|
| 265 |
+
return self[2]
|
| 266 |
+
|
| 267 |
+
def _set_size(self, size):
|
| 268 |
+
self[2] = size
|
| 269 |
+
|
| 270 |
+
size = property(_size, _set_size)
|
| 271 |
+
|
| 272 |
+
def _stream(self):
|
| 273 |
+
return self[3]
|
| 274 |
+
|
| 275 |
+
def _set_stream(self, stream):
|
| 276 |
+
self[3] = stream
|
| 277 |
+
|
| 278 |
+
stream = property(_stream, _set_stream)
|
| 279 |
+
|
| 280 |
+
#} END odb info interface
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
class InvalidOInfo(tuple):
|
| 284 |
+
|
| 285 |
+
"""Carries information about a sha identifying an object which is invalid in
|
| 286 |
+
the queried database. The exception attribute provides more information about
|
| 287 |
+
the cause of the issue"""
|
| 288 |
+
__slots__ = tuple()
|
| 289 |
+
|
| 290 |
+
def __new__(cls, sha, exc):
|
| 291 |
+
return tuple.__new__(cls, (sha, exc))
|
| 292 |
+
|
| 293 |
+
def __init__(self, sha, exc):
|
| 294 |
+
tuple.__init__(self, (sha, exc))
|
| 295 |
+
|
| 296 |
+
@property
|
| 297 |
+
def binsha(self):
|
| 298 |
+
return self[0]
|
| 299 |
+
|
| 300 |
+
@property
|
| 301 |
+
def hexsha(self):
|
| 302 |
+
return bin_to_hex(self[0])
|
| 303 |
+
|
| 304 |
+
@property
|
| 305 |
+
def error(self):
|
| 306 |
+
""":return: exception instance explaining the failure"""
|
| 307 |
+
return self[1]
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
class InvalidOStream(InvalidOInfo):
|
| 311 |
+
|
| 312 |
+
"""Carries information about an invalid ODB stream"""
|
| 313 |
+
__slots__ = tuple()
|
| 314 |
+
|
| 315 |
+
#} END ODB Bases
|
vllm/lib/python3.10/site-packages/gitdb/const.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
BYTE_SPACE = b' '
|
| 2 |
+
NULL_BYTE = b'\0'
|
| 3 |
+
NULL_HEX_SHA = "0" * 40
|
| 4 |
+
NULL_BIN_SHA = NULL_BYTE * 20
|
vllm/lib/python3.10/site-packages/gitdb/db/__init__.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
|
| 2 |
+
#
|
| 3 |
+
# This module is part of GitDB and is released under
|
| 4 |
+
# the New BSD License: https://opensource.org/license/bsd-3-clause/
|
| 5 |
+
|
| 6 |
+
from gitdb.db.base import *
|
| 7 |
+
from gitdb.db.loose import *
|
| 8 |
+
from gitdb.db.mem import *
|
| 9 |
+
from gitdb.db.pack import *
|
| 10 |
+
from gitdb.db.git import *
|
| 11 |
+
from gitdb.db.ref import *
|
vllm/lib/python3.10/site-packages/gitdb/db/__pycache__/base.cpython-310.pyc
ADDED
|
Binary file (9.68 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/gitdb/db/__pycache__/git.cpython-310.pyc
ADDED
|
Binary file (2.31 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/gitdb/db/__pycache__/pack.cpython-310.pyc
ADDED
|
Binary file (6.13 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/gitdb/db/base.py
ADDED
|
@@ -0,0 +1,278 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
|
| 2 |
+
#
|
| 3 |
+
# This module is part of GitDB and is released under
|
| 4 |
+
# the New BSD License: https://opensource.org/license/bsd-3-clause/
|
| 5 |
+
"""Contains implementations of database retrieveing objects"""
|
| 6 |
+
from gitdb.util import (
|
| 7 |
+
join,
|
| 8 |
+
LazyMixin,
|
| 9 |
+
hex_to_bin
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
from gitdb.utils.encoding import force_text
|
| 13 |
+
from gitdb.exc import (
|
| 14 |
+
BadObject,
|
| 15 |
+
AmbiguousObjectName
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
from itertools import chain
|
| 19 |
+
from functools import reduce
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
__all__ = ('ObjectDBR', 'ObjectDBW', 'FileDBBase', 'CompoundDB', 'CachingDB')
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class ObjectDBR:
|
| 26 |
+
|
| 27 |
+
"""Defines an interface for object database lookup.
|
| 28 |
+
Objects are identified either by their 20 byte bin sha"""
|
| 29 |
+
|
| 30 |
+
def __contains__(self, sha):
|
| 31 |
+
return self.has_obj
|
| 32 |
+
|
| 33 |
+
#{ Query Interface
|
| 34 |
+
def has_object(self, sha):
|
| 35 |
+
"""
|
| 36 |
+
Whether the object identified by the given 20 bytes
|
| 37 |
+
binary sha is contained in the database
|
| 38 |
+
|
| 39 |
+
:return: True if the object identified by the given 20 bytes
|
| 40 |
+
binary sha is contained in the database"""
|
| 41 |
+
raise NotImplementedError("To be implemented in subclass")
|
| 42 |
+
|
| 43 |
+
def info(self, sha):
|
| 44 |
+
""" :return: OInfo instance
|
| 45 |
+
:param sha: bytes binary sha
|
| 46 |
+
:raise BadObject:"""
|
| 47 |
+
raise NotImplementedError("To be implemented in subclass")
|
| 48 |
+
|
| 49 |
+
def stream(self, sha):
|
| 50 |
+
""":return: OStream instance
|
| 51 |
+
:param sha: 20 bytes binary sha
|
| 52 |
+
:raise BadObject:"""
|
| 53 |
+
raise NotImplementedError("To be implemented in subclass")
|
| 54 |
+
|
| 55 |
+
def size(self):
|
| 56 |
+
""":return: amount of objects in this database"""
|
| 57 |
+
raise NotImplementedError()
|
| 58 |
+
|
| 59 |
+
def sha_iter(self):
|
| 60 |
+
"""Return iterator yielding 20 byte shas for all objects in this data base"""
|
| 61 |
+
raise NotImplementedError()
|
| 62 |
+
|
| 63 |
+
#} END query interface
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class ObjectDBW:
|
| 67 |
+
|
| 68 |
+
"""Defines an interface to create objects in the database"""
|
| 69 |
+
|
| 70 |
+
def __init__(self, *args, **kwargs):
|
| 71 |
+
self._ostream = None
|
| 72 |
+
|
| 73 |
+
#{ Edit Interface
|
| 74 |
+
def set_ostream(self, stream):
|
| 75 |
+
"""
|
| 76 |
+
Adjusts the stream to which all data should be sent when storing new objects
|
| 77 |
+
|
| 78 |
+
:param stream: if not None, the stream to use, if None the default stream
|
| 79 |
+
will be used.
|
| 80 |
+
:return: previously installed stream, or None if there was no override
|
| 81 |
+
:raise TypeError: if the stream doesn't have the supported functionality"""
|
| 82 |
+
cstream = self._ostream
|
| 83 |
+
self._ostream = stream
|
| 84 |
+
return cstream
|
| 85 |
+
|
| 86 |
+
def ostream(self):
|
| 87 |
+
"""
|
| 88 |
+
Return the output stream
|
| 89 |
+
|
| 90 |
+
:return: overridden output stream this instance will write to, or None
|
| 91 |
+
if it will write to the default stream"""
|
| 92 |
+
return self._ostream
|
| 93 |
+
|
| 94 |
+
def store(self, istream):
|
| 95 |
+
"""
|
| 96 |
+
Create a new object in the database
|
| 97 |
+
:return: the input istream object with its sha set to its corresponding value
|
| 98 |
+
|
| 99 |
+
:param istream: IStream compatible instance. If its sha is already set
|
| 100 |
+
to a value, the object will just be stored in the our database format,
|
| 101 |
+
in which case the input stream is expected to be in object format ( header + contents ).
|
| 102 |
+
:raise IOError: if data could not be written"""
|
| 103 |
+
raise NotImplementedError("To be implemented in subclass")
|
| 104 |
+
|
| 105 |
+
#} END edit interface
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
class FileDBBase:
|
| 109 |
+
|
| 110 |
+
"""Provides basic facilities to retrieve files of interest, including
|
| 111 |
+
caching facilities to help mapping hexsha's to objects"""
|
| 112 |
+
|
| 113 |
+
def __init__(self, root_path):
|
| 114 |
+
"""Initialize this instance to look for its files at the given root path
|
| 115 |
+
All subsequent operations will be relative to this path
|
| 116 |
+
:raise InvalidDBRoot:
|
| 117 |
+
**Note:** The base will not perform any accessablity checking as the base
|
| 118 |
+
might not yet be accessible, but become accessible before the first
|
| 119 |
+
access."""
|
| 120 |
+
super().__init__()
|
| 121 |
+
self._root_path = root_path
|
| 122 |
+
|
| 123 |
+
#{ Interface
|
| 124 |
+
def root_path(self):
|
| 125 |
+
""":return: path at which this db operates"""
|
| 126 |
+
return self._root_path
|
| 127 |
+
|
| 128 |
+
def db_path(self, rela_path):
|
| 129 |
+
"""
|
| 130 |
+
:return: the given relative path relative to our database root, allowing
|
| 131 |
+
to pontentially access datafiles"""
|
| 132 |
+
return join(self._root_path, force_text(rela_path))
|
| 133 |
+
#} END interface
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
class CachingDB:
|
| 137 |
+
|
| 138 |
+
"""A database which uses caches to speed-up access"""
|
| 139 |
+
|
| 140 |
+
#{ Interface
|
| 141 |
+
def update_cache(self, force=False):
|
| 142 |
+
"""
|
| 143 |
+
Call this method if the underlying data changed to trigger an update
|
| 144 |
+
of the internal caching structures.
|
| 145 |
+
|
| 146 |
+
:param force: if True, the update must be performed. Otherwise the implementation
|
| 147 |
+
may decide not to perform an update if it thinks nothing has changed.
|
| 148 |
+
:return: True if an update was performed as something change indeed"""
|
| 149 |
+
|
| 150 |
+
# END interface
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def _databases_recursive(database, output):
|
| 154 |
+
"""Fill output list with database from db, in order. Deals with Loose, Packed
|
| 155 |
+
and compound databases."""
|
| 156 |
+
if isinstance(database, CompoundDB):
|
| 157 |
+
dbs = database.databases()
|
| 158 |
+
output.extend(db for db in dbs if not isinstance(db, CompoundDB))
|
| 159 |
+
for cdb in (db for db in dbs if isinstance(db, CompoundDB)):
|
| 160 |
+
_databases_recursive(cdb, output)
|
| 161 |
+
else:
|
| 162 |
+
output.append(database)
|
| 163 |
+
# END handle database type
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
class CompoundDB(ObjectDBR, LazyMixin, CachingDB):
|
| 167 |
+
|
| 168 |
+
"""A database which delegates calls to sub-databases.
|
| 169 |
+
|
| 170 |
+
Databases are stored in the lazy-loaded _dbs attribute.
|
| 171 |
+
Define _set_cache_ to update it with your databases"""
|
| 172 |
+
|
| 173 |
+
def _set_cache_(self, attr):
|
| 174 |
+
if attr == '_dbs':
|
| 175 |
+
self._dbs = list()
|
| 176 |
+
elif attr == '_db_cache':
|
| 177 |
+
self._db_cache = dict()
|
| 178 |
+
else:
|
| 179 |
+
super()._set_cache_(attr)
|
| 180 |
+
|
| 181 |
+
def _db_query(self, sha):
|
| 182 |
+
""":return: database containing the given 20 byte sha
|
| 183 |
+
:raise BadObject:"""
|
| 184 |
+
# most databases use binary representations, prevent converting
|
| 185 |
+
# it every time a database is being queried
|
| 186 |
+
try:
|
| 187 |
+
return self._db_cache[sha]
|
| 188 |
+
except KeyError:
|
| 189 |
+
pass
|
| 190 |
+
# END first level cache
|
| 191 |
+
|
| 192 |
+
for db in self._dbs:
|
| 193 |
+
if db.has_object(sha):
|
| 194 |
+
self._db_cache[sha] = db
|
| 195 |
+
return db
|
| 196 |
+
# END for each database
|
| 197 |
+
raise BadObject(sha)
|
| 198 |
+
|
| 199 |
+
#{ ObjectDBR interface
|
| 200 |
+
|
| 201 |
+
def has_object(self, sha):
|
| 202 |
+
try:
|
| 203 |
+
self._db_query(sha)
|
| 204 |
+
return True
|
| 205 |
+
except BadObject:
|
| 206 |
+
return False
|
| 207 |
+
# END handle exceptions
|
| 208 |
+
|
| 209 |
+
def info(self, sha):
|
| 210 |
+
return self._db_query(sha).info(sha)
|
| 211 |
+
|
| 212 |
+
def stream(self, sha):
|
| 213 |
+
return self._db_query(sha).stream(sha)
|
| 214 |
+
|
| 215 |
+
def size(self):
|
| 216 |
+
""":return: total size of all contained databases"""
|
| 217 |
+
return reduce(lambda x, y: x + y, (db.size() for db in self._dbs), 0)
|
| 218 |
+
|
| 219 |
+
def sha_iter(self):
|
| 220 |
+
return chain(*(db.sha_iter() for db in self._dbs))
|
| 221 |
+
|
| 222 |
+
#} END object DBR Interface
|
| 223 |
+
|
| 224 |
+
#{ Interface
|
| 225 |
+
|
| 226 |
+
def databases(self):
|
| 227 |
+
""":return: tuple of database instances we use for lookups"""
|
| 228 |
+
return tuple(self._dbs)
|
| 229 |
+
|
| 230 |
+
def update_cache(self, force=False):
|
| 231 |
+
# something might have changed, clear everything
|
| 232 |
+
self._db_cache.clear()
|
| 233 |
+
stat = False
|
| 234 |
+
for db in self._dbs:
|
| 235 |
+
if isinstance(db, CachingDB):
|
| 236 |
+
stat |= db.update_cache(force)
|
| 237 |
+
# END if is caching db
|
| 238 |
+
# END for each database to update
|
| 239 |
+
return stat
|
| 240 |
+
|
| 241 |
+
def partial_to_complete_sha_hex(self, partial_hexsha):
|
| 242 |
+
"""
|
| 243 |
+
:return: 20 byte binary sha1 from the given less-than-40 byte hexsha (bytes or str)
|
| 244 |
+
:param partial_hexsha: hexsha with less than 40 byte
|
| 245 |
+
:raise AmbiguousObjectName: """
|
| 246 |
+
databases = list()
|
| 247 |
+
_databases_recursive(self, databases)
|
| 248 |
+
partial_hexsha = force_text(partial_hexsha)
|
| 249 |
+
len_partial_hexsha = len(partial_hexsha)
|
| 250 |
+
if len_partial_hexsha % 2 != 0:
|
| 251 |
+
partial_binsha = hex_to_bin(partial_hexsha + "0")
|
| 252 |
+
else:
|
| 253 |
+
partial_binsha = hex_to_bin(partial_hexsha)
|
| 254 |
+
# END assure successful binary conversion
|
| 255 |
+
|
| 256 |
+
candidate = None
|
| 257 |
+
for db in databases:
|
| 258 |
+
full_bin_sha = None
|
| 259 |
+
try:
|
| 260 |
+
if hasattr(db, 'partial_to_complete_sha_hex'):
|
| 261 |
+
full_bin_sha = db.partial_to_complete_sha_hex(partial_hexsha)
|
| 262 |
+
else:
|
| 263 |
+
full_bin_sha = db.partial_to_complete_sha(partial_binsha, len_partial_hexsha)
|
| 264 |
+
# END handle database type
|
| 265 |
+
except BadObject:
|
| 266 |
+
continue
|
| 267 |
+
# END ignore bad objects
|
| 268 |
+
if full_bin_sha:
|
| 269 |
+
if candidate and candidate != full_bin_sha:
|
| 270 |
+
raise AmbiguousObjectName(partial_hexsha)
|
| 271 |
+
candidate = full_bin_sha
|
| 272 |
+
# END handle candidate
|
| 273 |
+
# END for each db
|
| 274 |
+
if not candidate:
|
| 275 |
+
raise BadObject(partial_binsha)
|
| 276 |
+
return candidate
|
| 277 |
+
|
| 278 |
+
#} END interface
|
vllm/lib/python3.10/site-packages/gitdb/db/git.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
|
| 2 |
+
#
|
| 3 |
+
# This module is part of GitDB and is released under
|
| 4 |
+
# the New BSD License: https://opensource.org/license/bsd-3-clause/
|
| 5 |
+
from gitdb.db.base import (
|
| 6 |
+
CompoundDB,
|
| 7 |
+
ObjectDBW,
|
| 8 |
+
FileDBBase
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
from gitdb.db.loose import LooseObjectDB
|
| 12 |
+
from gitdb.db.pack import PackedDB
|
| 13 |
+
from gitdb.db.ref import ReferenceDB
|
| 14 |
+
|
| 15 |
+
from gitdb.exc import InvalidDBRoot
|
| 16 |
+
|
| 17 |
+
import os
|
| 18 |
+
|
| 19 |
+
__all__ = ('GitDB', )
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class GitDB(FileDBBase, ObjectDBW, CompoundDB):
|
| 23 |
+
|
| 24 |
+
"""A git-style object database, which contains all objects in the 'objects'
|
| 25 |
+
subdirectory
|
| 26 |
+
|
| 27 |
+
``IMPORTANT``: The usage of this implementation is highly discouraged as it fails to release file-handles.
|
| 28 |
+
This can be a problem with long-running processes and/or big repositories.
|
| 29 |
+
"""
|
| 30 |
+
# Configuration
|
| 31 |
+
PackDBCls = PackedDB
|
| 32 |
+
LooseDBCls = LooseObjectDB
|
| 33 |
+
ReferenceDBCls = ReferenceDB
|
| 34 |
+
|
| 35 |
+
# Directories
|
| 36 |
+
packs_dir = 'pack'
|
| 37 |
+
loose_dir = ''
|
| 38 |
+
alternates_dir = os.path.join('info', 'alternates')
|
| 39 |
+
|
| 40 |
+
def __init__(self, root_path):
|
| 41 |
+
"""Initialize ourselves on a git objects directory"""
|
| 42 |
+
super().__init__(root_path)
|
| 43 |
+
|
| 44 |
+
def _set_cache_(self, attr):
|
| 45 |
+
if attr == '_dbs' or attr == '_loose_db':
|
| 46 |
+
self._dbs = list()
|
| 47 |
+
loose_db = None
|
| 48 |
+
for subpath, dbcls in ((self.packs_dir, self.PackDBCls),
|
| 49 |
+
(self.loose_dir, self.LooseDBCls),
|
| 50 |
+
(self.alternates_dir, self.ReferenceDBCls)):
|
| 51 |
+
path = self.db_path(subpath)
|
| 52 |
+
if os.path.exists(path):
|
| 53 |
+
self._dbs.append(dbcls(path))
|
| 54 |
+
if dbcls is self.LooseDBCls:
|
| 55 |
+
loose_db = self._dbs[-1]
|
| 56 |
+
# END remember loose db
|
| 57 |
+
# END check path exists
|
| 58 |
+
# END for each db type
|
| 59 |
+
|
| 60 |
+
# should have at least one subdb
|
| 61 |
+
if not self._dbs:
|
| 62 |
+
raise InvalidDBRoot(self.root_path())
|
| 63 |
+
# END handle error
|
| 64 |
+
|
| 65 |
+
# we the first one should have the store method
|
| 66 |
+
assert loose_db is not None and hasattr(loose_db, 'store'), "First database needs store functionality"
|
| 67 |
+
|
| 68 |
+
# finally set the value
|
| 69 |
+
self._loose_db = loose_db
|
| 70 |
+
else:
|
| 71 |
+
super()._set_cache_(attr)
|
| 72 |
+
# END handle attrs
|
| 73 |
+
|
| 74 |
+
#{ ObjectDBW interface
|
| 75 |
+
|
| 76 |
+
def store(self, istream):
|
| 77 |
+
return self._loose_db.store(istream)
|
| 78 |
+
|
| 79 |
+
def ostream(self):
|
| 80 |
+
return self._loose_db.ostream()
|
| 81 |
+
|
| 82 |
+
def set_ostream(self, ostream):
|
| 83 |
+
return self._loose_db.set_ostream(ostream)
|
| 84 |
+
|
| 85 |
+
#} END objectdbw interface
|
vllm/lib/python3.10/site-packages/gitdb/db/mem.py
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
|
| 2 |
+
#
|
| 3 |
+
# This module is part of GitDB and is released under
|
| 4 |
+
# the New BSD License: https://opensource.org/license/bsd-3-clause/
|
| 5 |
+
"""Contains the MemoryDatabase implementation"""
|
| 6 |
+
from gitdb.db.loose import LooseObjectDB
|
| 7 |
+
from gitdb.db.base import (
|
| 8 |
+
ObjectDBR,
|
| 9 |
+
ObjectDBW
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
from gitdb.base import (
|
| 13 |
+
OStream,
|
| 14 |
+
IStream,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
from gitdb.exc import (
|
| 18 |
+
BadObject,
|
| 19 |
+
UnsupportedOperation
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
from gitdb.stream import (
|
| 23 |
+
ZippedStoreShaWriter,
|
| 24 |
+
DecompressMemMapReader,
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
from io import BytesIO
|
| 28 |
+
|
| 29 |
+
__all__ = ("MemoryDB", )
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class MemoryDB(ObjectDBR, ObjectDBW):
|
| 33 |
+
|
| 34 |
+
"""A memory database stores everything to memory, providing fast IO and object
|
| 35 |
+
retrieval. It should be used to buffer results and obtain SHAs before writing
|
| 36 |
+
it to the actual physical storage, as it allows to query whether object already
|
| 37 |
+
exists in the target storage before introducing actual IO"""
|
| 38 |
+
|
| 39 |
+
def __init__(self):
|
| 40 |
+
super().__init__()
|
| 41 |
+
self._db = LooseObjectDB("path/doesnt/matter")
|
| 42 |
+
|
| 43 |
+
# maps 20 byte shas to their OStream objects
|
| 44 |
+
self._cache = dict()
|
| 45 |
+
|
| 46 |
+
def set_ostream(self, stream):
|
| 47 |
+
raise UnsupportedOperation("MemoryDB's always stream into memory")
|
| 48 |
+
|
| 49 |
+
def store(self, istream):
|
| 50 |
+
zstream = ZippedStoreShaWriter()
|
| 51 |
+
self._db.set_ostream(zstream)
|
| 52 |
+
|
| 53 |
+
istream = self._db.store(istream)
|
| 54 |
+
zstream.close() # close to flush
|
| 55 |
+
zstream.seek(0)
|
| 56 |
+
|
| 57 |
+
# don't provide a size, the stream is written in object format, hence the
|
| 58 |
+
# header needs decompression
|
| 59 |
+
decomp_stream = DecompressMemMapReader(zstream.getvalue(), close_on_deletion=False)
|
| 60 |
+
self._cache[istream.binsha] = OStream(istream.binsha, istream.type, istream.size, decomp_stream)
|
| 61 |
+
|
| 62 |
+
return istream
|
| 63 |
+
|
| 64 |
+
def has_object(self, sha):
|
| 65 |
+
return sha in self._cache
|
| 66 |
+
|
| 67 |
+
def info(self, sha):
|
| 68 |
+
# we always return streams, which are infos as well
|
| 69 |
+
return self.stream(sha)
|
| 70 |
+
|
| 71 |
+
def stream(self, sha):
|
| 72 |
+
try:
|
| 73 |
+
ostream = self._cache[sha]
|
| 74 |
+
# rewind stream for the next one to read
|
| 75 |
+
ostream.stream.seek(0)
|
| 76 |
+
return ostream
|
| 77 |
+
except KeyError as e:
|
| 78 |
+
raise BadObject(sha) from e
|
| 79 |
+
# END exception handling
|
| 80 |
+
|
| 81 |
+
def size(self):
|
| 82 |
+
return len(self._cache)
|
| 83 |
+
|
| 84 |
+
def sha_iter(self):
|
| 85 |
+
return self._cache.keys()
|
| 86 |
+
|
| 87 |
+
#{ Interface
|
| 88 |
+
def stream_copy(self, sha_iter, odb):
|
| 89 |
+
"""Copy the streams as identified by sha's yielded by sha_iter into the given odb
|
| 90 |
+
The streams will be copied directly
|
| 91 |
+
**Note:** the object will only be written if it did not exist in the target db
|
| 92 |
+
|
| 93 |
+
:return: amount of streams actually copied into odb. If smaller than the amount
|
| 94 |
+
of input shas, one or more objects did already exist in odb"""
|
| 95 |
+
count = 0
|
| 96 |
+
for sha in sha_iter:
|
| 97 |
+
if odb.has_object(sha):
|
| 98 |
+
continue
|
| 99 |
+
# END check object existence
|
| 100 |
+
|
| 101 |
+
ostream = self.stream(sha)
|
| 102 |
+
# compressed data including header
|
| 103 |
+
sio = BytesIO(ostream.stream.data())
|
| 104 |
+
istream = IStream(ostream.type, ostream.size, sio, sha)
|
| 105 |
+
|
| 106 |
+
odb.store(istream)
|
| 107 |
+
count += 1
|
| 108 |
+
# END for each sha
|
| 109 |
+
return count
|
| 110 |
+
#} END interface
|
vllm/lib/python3.10/site-packages/gitdb/db/pack.py
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
|
| 2 |
+
#
|
| 3 |
+
# This module is part of GitDB and is released under
|
| 4 |
+
# the New BSD License: https://opensource.org/license/bsd-3-clause/
|
| 5 |
+
"""Module containing a database to deal with packs"""
|
| 6 |
+
from gitdb.db.base import (
|
| 7 |
+
FileDBBase,
|
| 8 |
+
ObjectDBR,
|
| 9 |
+
CachingDB
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
from gitdb.util import LazyMixin
|
| 13 |
+
|
| 14 |
+
from gitdb.exc import (
|
| 15 |
+
BadObject,
|
| 16 |
+
UnsupportedOperation,
|
| 17 |
+
AmbiguousObjectName
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
from gitdb.pack import PackEntity
|
| 21 |
+
|
| 22 |
+
from functools import reduce
|
| 23 |
+
|
| 24 |
+
import os
|
| 25 |
+
import glob
|
| 26 |
+
|
| 27 |
+
__all__ = ('PackedDB', )
|
| 28 |
+
|
| 29 |
+
#{ Utilities
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class PackedDB(FileDBBase, ObjectDBR, CachingDB, LazyMixin):
|
| 33 |
+
|
| 34 |
+
"""A database operating on a set of object packs"""
|
| 35 |
+
|
| 36 |
+
# sort the priority list every N queries
|
| 37 |
+
# Higher values are better, performance tests don't show this has
|
| 38 |
+
# any effect, but it should have one
|
| 39 |
+
_sort_interval = 500
|
| 40 |
+
|
| 41 |
+
def __init__(self, root_path):
|
| 42 |
+
super().__init__(root_path)
|
| 43 |
+
# list of lists with three items:
|
| 44 |
+
# * hits - number of times the pack was hit with a request
|
| 45 |
+
# * entity - Pack entity instance
|
| 46 |
+
# * sha_to_index - PackIndexFile.sha_to_index method for direct cache query
|
| 47 |
+
# self._entities = list() # lazy loaded list
|
| 48 |
+
self._hit_count = 0 # amount of hits
|
| 49 |
+
self._st_mtime = 0 # last modification data of our root path
|
| 50 |
+
|
| 51 |
+
def _set_cache_(self, attr):
|
| 52 |
+
if attr == '_entities':
|
| 53 |
+
self._entities = list()
|
| 54 |
+
self.update_cache(force=True)
|
| 55 |
+
# END handle entities initialization
|
| 56 |
+
|
| 57 |
+
def _sort_entities(self):
|
| 58 |
+
self._entities.sort(key=lambda l: l[0], reverse=True)
|
| 59 |
+
|
| 60 |
+
def _pack_info(self, sha):
|
| 61 |
+
""":return: tuple(entity, index) for an item at the given sha
|
| 62 |
+
:param sha: 20 or 40 byte sha
|
| 63 |
+
:raise BadObject:
|
| 64 |
+
**Note:** This method is not thread-safe, but may be hit in multi-threaded
|
| 65 |
+
operation. The worst thing that can happen though is a counter that
|
| 66 |
+
was not incremented, or the list being in wrong order. So we safe
|
| 67 |
+
the time for locking here, lets see how that goes"""
|
| 68 |
+
# presort ?
|
| 69 |
+
if self._hit_count % self._sort_interval == 0:
|
| 70 |
+
self._sort_entities()
|
| 71 |
+
# END update sorting
|
| 72 |
+
|
| 73 |
+
for item in self._entities:
|
| 74 |
+
index = item[2](sha)
|
| 75 |
+
if index is not None:
|
| 76 |
+
item[0] += 1 # one hit for you
|
| 77 |
+
self._hit_count += 1 # general hit count
|
| 78 |
+
return (item[1], index)
|
| 79 |
+
# END index found in pack
|
| 80 |
+
# END for each item
|
| 81 |
+
|
| 82 |
+
# no hit, see whether we have to update packs
|
| 83 |
+
# NOTE: considering packs don't change very often, we safe this call
|
| 84 |
+
# and leave it to the super-caller to trigger that
|
| 85 |
+
raise BadObject(sha)
|
| 86 |
+
|
| 87 |
+
#{ Object DB Read
|
| 88 |
+
|
| 89 |
+
def has_object(self, sha):
|
| 90 |
+
try:
|
| 91 |
+
self._pack_info(sha)
|
| 92 |
+
return True
|
| 93 |
+
except BadObject:
|
| 94 |
+
return False
|
| 95 |
+
# END exception handling
|
| 96 |
+
|
| 97 |
+
def info(self, sha):
|
| 98 |
+
entity, index = self._pack_info(sha)
|
| 99 |
+
return entity.info_at_index(index)
|
| 100 |
+
|
| 101 |
+
def stream(self, sha):
|
| 102 |
+
entity, index = self._pack_info(sha)
|
| 103 |
+
return entity.stream_at_index(index)
|
| 104 |
+
|
| 105 |
+
def sha_iter(self):
|
| 106 |
+
for entity in self.entities():
|
| 107 |
+
index = entity.index()
|
| 108 |
+
sha_by_index = index.sha
|
| 109 |
+
for index in range(index.size()):
|
| 110 |
+
yield sha_by_index(index)
|
| 111 |
+
# END for each index
|
| 112 |
+
# END for each entity
|
| 113 |
+
|
| 114 |
+
def size(self):
|
| 115 |
+
sizes = [item[1].index().size() for item in self._entities]
|
| 116 |
+
return reduce(lambda x, y: x + y, sizes, 0)
|
| 117 |
+
|
| 118 |
+
#} END object db read
|
| 119 |
+
|
| 120 |
+
#{ object db write
|
| 121 |
+
|
| 122 |
+
def store(self, istream):
|
| 123 |
+
"""Storing individual objects is not feasible as a pack is designed to
|
| 124 |
+
hold multiple objects. Writing or rewriting packs for single objects is
|
| 125 |
+
inefficient"""
|
| 126 |
+
raise UnsupportedOperation()
|
| 127 |
+
|
| 128 |
+
#} END object db write
|
| 129 |
+
|
| 130 |
+
#{ Interface
|
| 131 |
+
|
| 132 |
+
def update_cache(self, force=False):
|
| 133 |
+
"""
|
| 134 |
+
Update our cache with the actually existing packs on disk. Add new ones,
|
| 135 |
+
and remove deleted ones. We keep the unchanged ones
|
| 136 |
+
|
| 137 |
+
:param force: If True, the cache will be updated even though the directory
|
| 138 |
+
does not appear to have changed according to its modification timestamp.
|
| 139 |
+
:return: True if the packs have been updated so there is new information,
|
| 140 |
+
False if there was no change to the pack database"""
|
| 141 |
+
stat = os.stat(self.root_path())
|
| 142 |
+
if not force and stat.st_mtime <= self._st_mtime:
|
| 143 |
+
return False
|
| 144 |
+
# END abort early on no change
|
| 145 |
+
self._st_mtime = stat.st_mtime
|
| 146 |
+
|
| 147 |
+
# packs are supposed to be prefixed with pack- by git-convention
|
| 148 |
+
# get all pack files, figure out what changed
|
| 149 |
+
pack_files = set(glob.glob(os.path.join(self.root_path(), "pack-*.pack")))
|
| 150 |
+
our_pack_files = {item[1].pack().path() for item in self._entities}
|
| 151 |
+
|
| 152 |
+
# new packs
|
| 153 |
+
for pack_file in (pack_files - our_pack_files):
|
| 154 |
+
# init the hit-counter/priority with the size, a good measure for hit-
|
| 155 |
+
# probability. Its implemented so that only 12 bytes will be read
|
| 156 |
+
entity = PackEntity(pack_file)
|
| 157 |
+
self._entities.append([entity.pack().size(), entity, entity.index().sha_to_index])
|
| 158 |
+
# END for each new packfile
|
| 159 |
+
|
| 160 |
+
# removed packs
|
| 161 |
+
for pack_file in (our_pack_files - pack_files):
|
| 162 |
+
del_index = -1
|
| 163 |
+
for i, item in enumerate(self._entities):
|
| 164 |
+
if item[1].pack().path() == pack_file:
|
| 165 |
+
del_index = i
|
| 166 |
+
break
|
| 167 |
+
# END found index
|
| 168 |
+
# END for each entity
|
| 169 |
+
assert del_index != -1
|
| 170 |
+
del(self._entities[del_index])
|
| 171 |
+
# END for each removed pack
|
| 172 |
+
|
| 173 |
+
# reinitialize prioritiess
|
| 174 |
+
self._sort_entities()
|
| 175 |
+
return True
|
| 176 |
+
|
| 177 |
+
def entities(self):
|
| 178 |
+
""":return: list of pack entities operated upon by this database"""
|
| 179 |
+
return [item[1] for item in self._entities]
|
| 180 |
+
|
| 181 |
+
def partial_to_complete_sha(self, partial_binsha, canonical_length):
|
| 182 |
+
""":return: 20 byte sha as inferred by the given partial binary sha
|
| 183 |
+
:param partial_binsha: binary sha with less than 20 bytes
|
| 184 |
+
:param canonical_length: length of the corresponding canonical representation.
|
| 185 |
+
It is required as binary sha's cannot display whether the original hex sha
|
| 186 |
+
had an odd or even number of characters
|
| 187 |
+
:raise AmbiguousObjectName:
|
| 188 |
+
:raise BadObject: """
|
| 189 |
+
candidate = None
|
| 190 |
+
for item in self._entities:
|
| 191 |
+
item_index = item[1].index().partial_sha_to_index(partial_binsha, canonical_length)
|
| 192 |
+
if item_index is not None:
|
| 193 |
+
sha = item[1].index().sha(item_index)
|
| 194 |
+
if candidate and candidate != sha:
|
| 195 |
+
raise AmbiguousObjectName(partial_binsha)
|
| 196 |
+
candidate = sha
|
| 197 |
+
# END handle full sha could be found
|
| 198 |
+
# END for each entity
|
| 199 |
+
|
| 200 |
+
if candidate:
|
| 201 |
+
return candidate
|
| 202 |
+
|
| 203 |
+
# still not found ?
|
| 204 |
+
raise BadObject(partial_binsha)
|
| 205 |
+
|
| 206 |
+
#} END interface
|
vllm/lib/python3.10/site-packages/gitdb/db/ref.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
|
| 2 |
+
#
|
| 3 |
+
# This module is part of GitDB and is released under
|
| 4 |
+
# the New BSD License: https://opensource.org/license/bsd-3-clause/
|
| 5 |
+
import codecs
|
| 6 |
+
from gitdb.db.base import (
|
| 7 |
+
CompoundDB,
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
__all__ = ('ReferenceDB', )
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class ReferenceDB(CompoundDB):
|
| 14 |
+
|
| 15 |
+
"""A database consisting of database referred to in a file"""
|
| 16 |
+
|
| 17 |
+
# Configuration
|
| 18 |
+
# Specifies the object database to use for the paths found in the alternates
|
| 19 |
+
# file. If None, it defaults to the GitDB
|
| 20 |
+
ObjectDBCls = None
|
| 21 |
+
|
| 22 |
+
def __init__(self, ref_file):
|
| 23 |
+
super().__init__()
|
| 24 |
+
self._ref_file = ref_file
|
| 25 |
+
|
| 26 |
+
def _set_cache_(self, attr):
|
| 27 |
+
if attr == '_dbs':
|
| 28 |
+
self._dbs = list()
|
| 29 |
+
self._update_dbs_from_ref_file()
|
| 30 |
+
else:
|
| 31 |
+
super()._set_cache_(attr)
|
| 32 |
+
# END handle attrs
|
| 33 |
+
|
| 34 |
+
def _update_dbs_from_ref_file(self):
|
| 35 |
+
dbcls = self.ObjectDBCls
|
| 36 |
+
if dbcls is None:
|
| 37 |
+
# late import
|
| 38 |
+
from gitdb.db.git import GitDB
|
| 39 |
+
dbcls = GitDB
|
| 40 |
+
# END get db type
|
| 41 |
+
|
| 42 |
+
# try to get as many as possible, don't fail if some are unavailable
|
| 43 |
+
ref_paths = list()
|
| 44 |
+
try:
|
| 45 |
+
with codecs.open(self._ref_file, 'r', encoding="utf-8") as f:
|
| 46 |
+
ref_paths = [l.strip() for l in f]
|
| 47 |
+
except OSError:
|
| 48 |
+
pass
|
| 49 |
+
# END handle alternates
|
| 50 |
+
|
| 51 |
+
ref_paths_set = set(ref_paths)
|
| 52 |
+
cur_ref_paths_set = {db.root_path() for db in self._dbs}
|
| 53 |
+
|
| 54 |
+
# remove existing
|
| 55 |
+
for path in (cur_ref_paths_set - ref_paths_set):
|
| 56 |
+
for i, db in enumerate(self._dbs[:]):
|
| 57 |
+
if db.root_path() == path:
|
| 58 |
+
del(self._dbs[i])
|
| 59 |
+
continue
|
| 60 |
+
# END del matching db
|
| 61 |
+
# END for each path to remove
|
| 62 |
+
|
| 63 |
+
# add new
|
| 64 |
+
# sort them to maintain order
|
| 65 |
+
added_paths = sorted(ref_paths_set - cur_ref_paths_set, key=lambda p: ref_paths.index(p))
|
| 66 |
+
for path in added_paths:
|
| 67 |
+
try:
|
| 68 |
+
db = dbcls(path)
|
| 69 |
+
# force an update to verify path
|
| 70 |
+
if isinstance(db, CompoundDB):
|
| 71 |
+
db.databases()
|
| 72 |
+
# END verification
|
| 73 |
+
self._dbs.append(db)
|
| 74 |
+
except Exception:
|
| 75 |
+
# ignore invalid paths or issues
|
| 76 |
+
pass
|
| 77 |
+
# END for each path to add
|
| 78 |
+
|
| 79 |
+
def update_cache(self, force=False):
|
| 80 |
+
# re-read alternates and update databases
|
| 81 |
+
self._update_dbs_from_ref_file()
|
| 82 |
+
return super().update_cache(force)
|
vllm/lib/python3.10/site-packages/gitdb/exc.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
|
| 2 |
+
#
|
| 3 |
+
# This module is part of GitDB and is released under
|
| 4 |
+
# the New BSD License: https://opensource.org/license/bsd-3-clause/
|
| 5 |
+
"""Module with common exceptions"""
|
| 6 |
+
from gitdb.util import to_hex_sha
|
| 7 |
+
|
| 8 |
+
__all__ = [
|
| 9 |
+
'AmbiguousObjectName',
|
| 10 |
+
'BadName',
|
| 11 |
+
'BadObject',
|
| 12 |
+
'BadObjectType',
|
| 13 |
+
'InvalidDBRoot',
|
| 14 |
+
'ODBError',
|
| 15 |
+
'ParseError',
|
| 16 |
+
'UnsupportedOperation',
|
| 17 |
+
'to_hex_sha',
|
| 18 |
+
]
|
| 19 |
+
|
| 20 |
+
class ODBError(Exception):
|
| 21 |
+
"""All errors thrown by the object database"""
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class InvalidDBRoot(ODBError):
|
| 25 |
+
"""Thrown if an object database cannot be initialized at the given path"""
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class BadObject(ODBError):
|
| 29 |
+
"""The object with the given SHA does not exist. Instantiate with the
|
| 30 |
+
failed sha"""
|
| 31 |
+
|
| 32 |
+
def __str__(self):
|
| 33 |
+
return "BadObject: %s" % to_hex_sha(self.args[0])
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class BadName(ODBError):
|
| 37 |
+
"""A name provided to rev_parse wasn't understood"""
|
| 38 |
+
|
| 39 |
+
def __str__(self):
|
| 40 |
+
return "Ref '%s' did not resolve to an object" % self.args[0]
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class ParseError(ODBError):
|
| 44 |
+
"""Thrown if the parsing of a file failed due to an invalid format"""
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class AmbiguousObjectName(ODBError):
|
| 48 |
+
"""Thrown if a possibly shortened name does not uniquely represent a single object
|
| 49 |
+
in the database"""
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class BadObjectType(ODBError):
|
| 53 |
+
"""The object had an unsupported type"""
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class UnsupportedOperation(ODBError):
|
| 57 |
+
"""Thrown if the given operation cannot be supported by the object database"""
|
vllm/lib/python3.10/site-packages/gitdb/fun.py
ADDED
|
@@ -0,0 +1,704 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
|
| 2 |
+
#
|
| 3 |
+
# This module is part of GitDB and is released under
|
| 4 |
+
# the New BSD License: https://opensource.org/license/bsd-3-clause/
|
| 5 |
+
"""Contains basic c-functions which usually contain performance critical code
|
| 6 |
+
Keeping this code separate from the beginning makes it easier to out-source
|
| 7 |
+
it into c later, if required"""
|
| 8 |
+
|
| 9 |
+
import zlib
|
| 10 |
+
from gitdb.util import byte_ord
|
| 11 |
+
decompressobj = zlib.decompressobj
|
| 12 |
+
|
| 13 |
+
import mmap
|
| 14 |
+
from itertools import islice
|
| 15 |
+
from functools import reduce
|
| 16 |
+
|
| 17 |
+
from gitdb.const import NULL_BYTE, BYTE_SPACE
|
| 18 |
+
from gitdb.utils.encoding import force_text
|
| 19 |
+
from gitdb.typ import (
|
| 20 |
+
str_blob_type,
|
| 21 |
+
str_commit_type,
|
| 22 |
+
str_tree_type,
|
| 23 |
+
str_tag_type,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
from io import StringIO
|
| 27 |
+
|
| 28 |
+
# INVARIANTS
|
| 29 |
+
OFS_DELTA = 6
|
| 30 |
+
REF_DELTA = 7
|
| 31 |
+
delta_types = (OFS_DELTA, REF_DELTA)
|
| 32 |
+
|
| 33 |
+
type_id_to_type_map = {
|
| 34 |
+
0: b'', # EXT 1
|
| 35 |
+
1: str_commit_type,
|
| 36 |
+
2: str_tree_type,
|
| 37 |
+
3: str_blob_type,
|
| 38 |
+
4: str_tag_type,
|
| 39 |
+
5: b'', # EXT 2
|
| 40 |
+
OFS_DELTA: "OFS_DELTA", # OFFSET DELTA
|
| 41 |
+
REF_DELTA: "REF_DELTA" # REFERENCE DELTA
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
type_to_type_id_map = {
|
| 45 |
+
str_commit_type: 1,
|
| 46 |
+
str_tree_type: 2,
|
| 47 |
+
str_blob_type: 3,
|
| 48 |
+
str_tag_type: 4,
|
| 49 |
+
"OFS_DELTA": OFS_DELTA,
|
| 50 |
+
"REF_DELTA": REF_DELTA,
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
# used when dealing with larger streams
|
| 54 |
+
chunk_size = 1000 * mmap.PAGESIZE
|
| 55 |
+
|
| 56 |
+
__all__ = ('is_loose_object', 'loose_object_header_info', 'msb_size', 'pack_object_header_info',
|
| 57 |
+
'write_object', 'loose_object_header', 'stream_copy', 'apply_delta_data',
|
| 58 |
+
'is_equal_canonical_sha', 'connect_deltas', 'DeltaChunkList', 'create_pack_object_header')
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
#{ Structures
|
| 62 |
+
|
| 63 |
+
def _set_delta_rbound(d, size):
|
| 64 |
+
"""Truncate the given delta to the given size
|
| 65 |
+
:param size: size relative to our target offset, may not be 0, must be smaller or equal
|
| 66 |
+
to our size
|
| 67 |
+
:return: d"""
|
| 68 |
+
d.ts = size
|
| 69 |
+
|
| 70 |
+
# NOTE: data is truncated automatically when applying the delta
|
| 71 |
+
# MUST NOT DO THIS HERE
|
| 72 |
+
return d
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def _move_delta_lbound(d, bytes):
|
| 76 |
+
"""Move the delta by the given amount of bytes, reducing its size so that its
|
| 77 |
+
right bound stays static
|
| 78 |
+
:param bytes: amount of bytes to move, must be smaller than delta size
|
| 79 |
+
:return: d"""
|
| 80 |
+
if bytes == 0:
|
| 81 |
+
return
|
| 82 |
+
|
| 83 |
+
d.to += bytes
|
| 84 |
+
d.so += bytes
|
| 85 |
+
d.ts -= bytes
|
| 86 |
+
if d.data is not None:
|
| 87 |
+
d.data = d.data[bytes:]
|
| 88 |
+
# END handle data
|
| 89 |
+
|
| 90 |
+
return d
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def delta_duplicate(src):
|
| 94 |
+
return DeltaChunk(src.to, src.ts, src.so, src.data)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def delta_chunk_apply(dc, bbuf, write):
|
| 98 |
+
"""Apply own data to the target buffer
|
| 99 |
+
:param bbuf: buffer providing source bytes for copy operations
|
| 100 |
+
:param write: write method to call with data to write"""
|
| 101 |
+
if dc.data is None:
|
| 102 |
+
# COPY DATA FROM SOURCE
|
| 103 |
+
write(bbuf[dc.so:dc.so + dc.ts])
|
| 104 |
+
else:
|
| 105 |
+
# APPEND DATA
|
| 106 |
+
# what's faster: if + 4 function calls or just a write with a slice ?
|
| 107 |
+
# Considering data can be larger than 127 bytes now, it should be worth it
|
| 108 |
+
if dc.ts < len(dc.data):
|
| 109 |
+
write(dc.data[:dc.ts])
|
| 110 |
+
else:
|
| 111 |
+
write(dc.data)
|
| 112 |
+
# END handle truncation
|
| 113 |
+
# END handle chunk mode
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
class DeltaChunk:
|
| 117 |
+
|
| 118 |
+
"""Represents a piece of a delta, it can either add new data, or copy existing
|
| 119 |
+
one from a source buffer"""
|
| 120 |
+
__slots__ = (
|
| 121 |
+
'to', # start offset in the target buffer in bytes
|
| 122 |
+
'ts', # size of this chunk in the target buffer in bytes
|
| 123 |
+
'so', # start offset in the source buffer in bytes or None
|
| 124 |
+
'data', # chunk of bytes to be added to the target buffer,
|
| 125 |
+
# DeltaChunkList to use as base, or None
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
def __init__(self, to, ts, so, data):
|
| 129 |
+
self.to = to
|
| 130 |
+
self.ts = ts
|
| 131 |
+
self.so = so
|
| 132 |
+
self.data = data
|
| 133 |
+
|
| 134 |
+
def __repr__(self):
|
| 135 |
+
return "DeltaChunk(%i, %i, %s, %s)" % (self.to, self.ts, self.so, self.data or "")
|
| 136 |
+
|
| 137 |
+
#{ Interface
|
| 138 |
+
|
| 139 |
+
def rbound(self):
|
| 140 |
+
return self.to + self.ts
|
| 141 |
+
|
| 142 |
+
def has_data(self):
|
| 143 |
+
""":return: True if the instance has data to add to the target stream"""
|
| 144 |
+
return self.data is not None
|
| 145 |
+
|
| 146 |
+
#} END interface
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def _closest_index(dcl, absofs):
|
| 150 |
+
""":return: index at which the given absofs should be inserted. The index points
|
| 151 |
+
to the DeltaChunk with a target buffer absofs that equals or is greater than
|
| 152 |
+
absofs.
|
| 153 |
+
**Note:** global method for performance only, it belongs to DeltaChunkList"""
|
| 154 |
+
lo = 0
|
| 155 |
+
hi = len(dcl)
|
| 156 |
+
while lo < hi:
|
| 157 |
+
mid = (lo + hi) / 2
|
| 158 |
+
dc = dcl[mid]
|
| 159 |
+
if dc.to > absofs:
|
| 160 |
+
hi = mid
|
| 161 |
+
elif dc.rbound() > absofs or dc.to == absofs:
|
| 162 |
+
return mid
|
| 163 |
+
else:
|
| 164 |
+
lo = mid + 1
|
| 165 |
+
# END handle bound
|
| 166 |
+
# END for each delta absofs
|
| 167 |
+
return len(dcl) - 1
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def delta_list_apply(dcl, bbuf, write):
|
| 171 |
+
"""Apply the chain's changes and write the final result using the passed
|
| 172 |
+
write function.
|
| 173 |
+
:param bbuf: base buffer containing the base of all deltas contained in this
|
| 174 |
+
list. It will only be used if the chunk in question does not have a base
|
| 175 |
+
chain.
|
| 176 |
+
:param write: function taking a string of bytes to write to the output"""
|
| 177 |
+
for dc in dcl:
|
| 178 |
+
delta_chunk_apply(dc, bbuf, write)
|
| 179 |
+
# END for each dc
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
def delta_list_slice(dcl, absofs, size, ndcl):
|
| 183 |
+
""":return: Subsection of this list at the given absolute offset, with the given
|
| 184 |
+
size in bytes.
|
| 185 |
+
:return: None"""
|
| 186 |
+
cdi = _closest_index(dcl, absofs) # delta start index
|
| 187 |
+
cd = dcl[cdi]
|
| 188 |
+
slen = len(dcl)
|
| 189 |
+
lappend = ndcl.append
|
| 190 |
+
|
| 191 |
+
if cd.to != absofs:
|
| 192 |
+
tcd = DeltaChunk(cd.to, cd.ts, cd.so, cd.data)
|
| 193 |
+
_move_delta_lbound(tcd, absofs - cd.to)
|
| 194 |
+
tcd.ts = min(tcd.ts, size)
|
| 195 |
+
lappend(tcd)
|
| 196 |
+
size -= tcd.ts
|
| 197 |
+
cdi += 1
|
| 198 |
+
# END lbound overlap handling
|
| 199 |
+
|
| 200 |
+
while cdi < slen and size:
|
| 201 |
+
# are we larger than the current block
|
| 202 |
+
cd = dcl[cdi]
|
| 203 |
+
if cd.ts <= size:
|
| 204 |
+
lappend(DeltaChunk(cd.to, cd.ts, cd.so, cd.data))
|
| 205 |
+
size -= cd.ts
|
| 206 |
+
else:
|
| 207 |
+
tcd = DeltaChunk(cd.to, cd.ts, cd.so, cd.data)
|
| 208 |
+
tcd.ts = size
|
| 209 |
+
lappend(tcd)
|
| 210 |
+
size -= tcd.ts
|
| 211 |
+
break
|
| 212 |
+
# END hadle size
|
| 213 |
+
cdi += 1
|
| 214 |
+
# END for each chunk
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
class DeltaChunkList(list):
|
| 218 |
+
|
| 219 |
+
"""List with special functionality to deal with DeltaChunks.
|
| 220 |
+
There are two types of lists we represent. The one was created bottom-up, working
|
| 221 |
+
towards the latest delta, the other kind was created top-down, working from the
|
| 222 |
+
latest delta down to the earliest ancestor. This attribute is queryable
|
| 223 |
+
after all processing with is_reversed."""
|
| 224 |
+
|
| 225 |
+
__slots__ = tuple()
|
| 226 |
+
|
| 227 |
+
def rbound(self):
|
| 228 |
+
""":return: rightmost extend in bytes, absolute"""
|
| 229 |
+
if len(self) == 0:
|
| 230 |
+
return 0
|
| 231 |
+
return self[-1].rbound()
|
| 232 |
+
|
| 233 |
+
def lbound(self):
|
| 234 |
+
""":return: leftmost byte at which this chunklist starts"""
|
| 235 |
+
if len(self) == 0:
|
| 236 |
+
return 0
|
| 237 |
+
return self[0].to
|
| 238 |
+
|
| 239 |
+
def size(self):
|
| 240 |
+
""":return: size of bytes as measured by our delta chunks"""
|
| 241 |
+
return self.rbound() - self.lbound()
|
| 242 |
+
|
| 243 |
+
def apply(self, bbuf, write):
|
| 244 |
+
"""Only used by public clients, internally we only use the global routines
|
| 245 |
+
for performance"""
|
| 246 |
+
return delta_list_apply(self, bbuf, write)
|
| 247 |
+
|
| 248 |
+
def compress(self):
|
| 249 |
+
"""Alter the list to reduce the amount of nodes. Currently we concatenate
|
| 250 |
+
add-chunks
|
| 251 |
+
:return: self"""
|
| 252 |
+
slen = len(self)
|
| 253 |
+
if slen < 2:
|
| 254 |
+
return self
|
| 255 |
+
i = 0
|
| 256 |
+
|
| 257 |
+
first_data_index = None
|
| 258 |
+
while i < slen:
|
| 259 |
+
dc = self[i]
|
| 260 |
+
i += 1
|
| 261 |
+
if dc.data is None:
|
| 262 |
+
if first_data_index is not None and i - 2 - first_data_index > 1:
|
| 263 |
+
# if first_data_index is not None:
|
| 264 |
+
nd = StringIO() # new data
|
| 265 |
+
so = self[first_data_index].to # start offset in target buffer
|
| 266 |
+
for x in range(first_data_index, i - 1):
|
| 267 |
+
xdc = self[x]
|
| 268 |
+
nd.write(xdc.data[:xdc.ts])
|
| 269 |
+
# END collect data
|
| 270 |
+
|
| 271 |
+
del(self[first_data_index:i - 1])
|
| 272 |
+
buf = nd.getvalue()
|
| 273 |
+
self.insert(first_data_index, DeltaChunk(so, len(buf), 0, buf))
|
| 274 |
+
|
| 275 |
+
slen = len(self)
|
| 276 |
+
i = first_data_index + 1
|
| 277 |
+
|
| 278 |
+
# END concatenate data
|
| 279 |
+
first_data_index = None
|
| 280 |
+
continue
|
| 281 |
+
# END skip non-data chunks
|
| 282 |
+
|
| 283 |
+
if first_data_index is None:
|
| 284 |
+
first_data_index = i - 1
|
| 285 |
+
# END iterate list
|
| 286 |
+
|
| 287 |
+
# if slen_orig != len(self):
|
| 288 |
+
# print "INFO: Reduced delta list len to %f %% of former size" % ((float(len(self)) / slen_orig) * 100)
|
| 289 |
+
return self
|
| 290 |
+
|
| 291 |
+
def check_integrity(self, target_size=-1):
|
| 292 |
+
"""Verify the list has non-overlapping chunks only, and the total size matches
|
| 293 |
+
target_size
|
| 294 |
+
:param target_size: if not -1, the total size of the chain must be target_size
|
| 295 |
+
:raise AssertionError: if the size doesn't match"""
|
| 296 |
+
if target_size > -1:
|
| 297 |
+
assert self[-1].rbound() == target_size
|
| 298 |
+
assert reduce(lambda x, y: x + y, (d.ts for d in self), 0) == target_size
|
| 299 |
+
# END target size verification
|
| 300 |
+
|
| 301 |
+
if len(self) < 2:
|
| 302 |
+
return
|
| 303 |
+
|
| 304 |
+
# check data
|
| 305 |
+
for dc in self:
|
| 306 |
+
assert dc.ts > 0
|
| 307 |
+
if dc.has_data():
|
| 308 |
+
assert len(dc.data) >= dc.ts
|
| 309 |
+
# END for each dc
|
| 310 |
+
|
| 311 |
+
left = islice(self, 0, len(self) - 1)
|
| 312 |
+
right = iter(self)
|
| 313 |
+
right.next()
|
| 314 |
+
# this is very pythonic - we might have just use index based access here,
|
| 315 |
+
# but this could actually be faster
|
| 316 |
+
for lft, rgt in zip(left, right):
|
| 317 |
+
assert lft.rbound() == rgt.to
|
| 318 |
+
assert lft.to + lft.ts == rgt.to
|
| 319 |
+
# END for each pair
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
class TopdownDeltaChunkList(DeltaChunkList):
|
| 323 |
+
|
| 324 |
+
"""Represents a list which is generated by feeding its ancestor streams one by
|
| 325 |
+
one"""
|
| 326 |
+
__slots__ = tuple()
|
| 327 |
+
|
| 328 |
+
def connect_with_next_base(self, bdcl):
|
| 329 |
+
"""Connect this chain with the next level of our base delta chunklist.
|
| 330 |
+
The goal in this game is to mark as many of our chunks rigid, hence they
|
| 331 |
+
cannot be changed by any of the upcoming bases anymore. Once all our
|
| 332 |
+
chunks are marked like that, we can stop all processing
|
| 333 |
+
:param bdcl: data chunk list being one of our bases. They must be fed in
|
| 334 |
+
consecutively and in order, towards the earliest ancestor delta
|
| 335 |
+
:return: True if processing was done. Use it to abort processing of
|
| 336 |
+
remaining streams if False is returned"""
|
| 337 |
+
nfc = 0 # number of frozen chunks
|
| 338 |
+
dci = 0 # delta chunk index
|
| 339 |
+
slen = len(self) # len of self
|
| 340 |
+
ccl = list() # temporary list
|
| 341 |
+
while dci < slen:
|
| 342 |
+
dc = self[dci]
|
| 343 |
+
dci += 1
|
| 344 |
+
|
| 345 |
+
# all add-chunks which are already topmost don't need additional processing
|
| 346 |
+
if dc.data is not None:
|
| 347 |
+
nfc += 1
|
| 348 |
+
continue
|
| 349 |
+
# END skip add chunks
|
| 350 |
+
|
| 351 |
+
# copy chunks
|
| 352 |
+
# integrate the portion of the base list into ourselves. Lists
|
| 353 |
+
# dont support efficient insertion ( just one at a time ), but for now
|
| 354 |
+
# we live with it. Internally, its all just a 32/64bit pointer, and
|
| 355 |
+
# the portions of moved memory should be smallish. Maybe we just rebuild
|
| 356 |
+
# ourselves in order to reduce the amount of insertions ...
|
| 357 |
+
del(ccl[:])
|
| 358 |
+
delta_list_slice(bdcl, dc.so, dc.ts, ccl)
|
| 359 |
+
|
| 360 |
+
# move the target bounds into place to match with our chunk
|
| 361 |
+
ofs = dc.to - dc.so
|
| 362 |
+
for cdc in ccl:
|
| 363 |
+
cdc.to += ofs
|
| 364 |
+
# END update target bounds
|
| 365 |
+
|
| 366 |
+
if len(ccl) == 1:
|
| 367 |
+
self[dci - 1] = ccl[0]
|
| 368 |
+
else:
|
| 369 |
+
# maybe try to compute the expenses here, and pick the right algorithm
|
| 370 |
+
# It would normally be faster than copying everything physically though
|
| 371 |
+
# TODO: Use a deque here, and decide by the index whether to extend
|
| 372 |
+
# or extend left !
|
| 373 |
+
post_dci = self[dci:]
|
| 374 |
+
del(self[dci - 1:]) # include deletion of dc
|
| 375 |
+
self.extend(ccl)
|
| 376 |
+
self.extend(post_dci)
|
| 377 |
+
|
| 378 |
+
slen = len(self)
|
| 379 |
+
dci += len(ccl) - 1 # deleted dc, added rest
|
| 380 |
+
|
| 381 |
+
# END handle chunk replacement
|
| 382 |
+
# END for each chunk
|
| 383 |
+
|
| 384 |
+
if nfc == slen:
|
| 385 |
+
return False
|
| 386 |
+
# END handle completeness
|
| 387 |
+
return True
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
#} END structures
|
| 391 |
+
|
| 392 |
+
#{ Routines
|
| 393 |
+
|
| 394 |
+
def is_loose_object(m):
|
| 395 |
+
"""
|
| 396 |
+
:return: True the file contained in memory map m appears to be a loose object.
|
| 397 |
+
Only the first two bytes are needed"""
|
| 398 |
+
b0, b1 = map(ord, m[:2])
|
| 399 |
+
word = (b0 << 8) + b1
|
| 400 |
+
return b0 == 0x78 and (word % 31) == 0
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
def loose_object_header_info(m):
|
| 404 |
+
"""
|
| 405 |
+
:return: tuple(type_string, uncompressed_size_in_bytes) the type string of the
|
| 406 |
+
object as well as its uncompressed size in bytes.
|
| 407 |
+
:param m: memory map from which to read the compressed object data"""
|
| 408 |
+
decompress_size = 8192 # is used in cgit as well
|
| 409 |
+
hdr = decompressobj().decompress(m, decompress_size)
|
| 410 |
+
type_name, size = hdr[:hdr.find(NULL_BYTE)].split(BYTE_SPACE)
|
| 411 |
+
|
| 412 |
+
return type_name, int(size)
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
def pack_object_header_info(data):
|
| 416 |
+
"""
|
| 417 |
+
:return: tuple(type_id, uncompressed_size_in_bytes, byte_offset)
|
| 418 |
+
The type_id should be interpreted according to the ``type_id_to_type_map`` map
|
| 419 |
+
The byte-offset specifies the start of the actual zlib compressed datastream
|
| 420 |
+
:param m: random-access memory, like a string or memory map"""
|
| 421 |
+
c = byte_ord(data[0]) # first byte
|
| 422 |
+
i = 1 # next char to read
|
| 423 |
+
type_id = (c >> 4) & 7 # numeric type
|
| 424 |
+
size = c & 15 # starting size
|
| 425 |
+
s = 4 # starting bit-shift size
|
| 426 |
+
while c & 0x80:
|
| 427 |
+
c = byte_ord(data[i])
|
| 428 |
+
i += 1
|
| 429 |
+
size += (c & 0x7f) << s
|
| 430 |
+
s += 7
|
| 431 |
+
# END character loop
|
| 432 |
+
# end performance at expense of maintenance ...
|
| 433 |
+
return (type_id, size, i)
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
def create_pack_object_header(obj_type, obj_size):
|
| 437 |
+
"""
|
| 438 |
+
:return: string defining the pack header comprised of the object type
|
| 439 |
+
and its incompressed size in bytes
|
| 440 |
+
|
| 441 |
+
:param obj_type: pack type_id of the object
|
| 442 |
+
:param obj_size: uncompressed size in bytes of the following object stream"""
|
| 443 |
+
c = 0 # 1 byte
|
| 444 |
+
hdr = bytearray() # output string
|
| 445 |
+
|
| 446 |
+
c = (obj_type << 4) | (obj_size & 0xf)
|
| 447 |
+
obj_size >>= 4
|
| 448 |
+
while obj_size:
|
| 449 |
+
hdr.append(c | 0x80)
|
| 450 |
+
c = obj_size & 0x7f
|
| 451 |
+
obj_size >>= 7
|
| 452 |
+
# END until size is consumed
|
| 453 |
+
hdr.append(c)
|
| 454 |
+
# end handle interpreter
|
| 455 |
+
return hdr
|
| 456 |
+
|
| 457 |
+
|
| 458 |
+
def msb_size(data, offset=0):
|
| 459 |
+
"""
|
| 460 |
+
:return: tuple(read_bytes, size) read the msb size from the given random
|
| 461 |
+
access data starting at the given byte offset"""
|
| 462 |
+
size = 0
|
| 463 |
+
i = 0
|
| 464 |
+
l = len(data)
|
| 465 |
+
hit_msb = False
|
| 466 |
+
while i < l:
|
| 467 |
+
c = data[i + offset]
|
| 468 |
+
size |= (c & 0x7f) << i * 7
|
| 469 |
+
i += 1
|
| 470 |
+
if not c & 0x80:
|
| 471 |
+
hit_msb = True
|
| 472 |
+
break
|
| 473 |
+
# END check msb bit
|
| 474 |
+
# END while in range
|
| 475 |
+
# end performance ...
|
| 476 |
+
if not hit_msb:
|
| 477 |
+
raise AssertionError("Could not find terminating MSB byte in data stream")
|
| 478 |
+
return i + offset, size
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
def loose_object_header(type, size):
|
| 482 |
+
"""
|
| 483 |
+
:return: bytes representing the loose object header, which is immediately
|
| 484 |
+
followed by the content stream of size 'size'"""
|
| 485 |
+
return ('%s %i\0' % (force_text(type), size)).encode('ascii')
|
| 486 |
+
|
| 487 |
+
|
| 488 |
+
def write_object(type, size, read, write, chunk_size=chunk_size):
|
| 489 |
+
"""
|
| 490 |
+
Write the object as identified by type, size and source_stream into the
|
| 491 |
+
target_stream
|
| 492 |
+
|
| 493 |
+
:param type: type string of the object
|
| 494 |
+
:param size: amount of bytes to write from source_stream
|
| 495 |
+
:param read: read method of a stream providing the content data
|
| 496 |
+
:param write: write method of the output stream
|
| 497 |
+
:param close_target_stream: if True, the target stream will be closed when
|
| 498 |
+
the routine exits, even if an error is thrown
|
| 499 |
+
:return: The actual amount of bytes written to stream, which includes the header and a trailing newline"""
|
| 500 |
+
tbw = 0 # total num bytes written
|
| 501 |
+
|
| 502 |
+
# WRITE HEADER: type SP size NULL
|
| 503 |
+
tbw += write(loose_object_header(type, size))
|
| 504 |
+
tbw += stream_copy(read, write, size, chunk_size)
|
| 505 |
+
|
| 506 |
+
return tbw
|
| 507 |
+
|
| 508 |
+
|
| 509 |
+
def stream_copy(read, write, size, chunk_size):
|
| 510 |
+
"""
|
| 511 |
+
Copy a stream up to size bytes using the provided read and write methods,
|
| 512 |
+
in chunks of chunk_size
|
| 513 |
+
|
| 514 |
+
**Note:** its much like stream_copy utility, but operates just using methods"""
|
| 515 |
+
dbw = 0 # num data bytes written
|
| 516 |
+
|
| 517 |
+
# WRITE ALL DATA UP TO SIZE
|
| 518 |
+
while True:
|
| 519 |
+
cs = min(chunk_size, size - dbw)
|
| 520 |
+
# NOTE: not all write methods return the amount of written bytes, like
|
| 521 |
+
# mmap.write. Its bad, but we just deal with it ... perhaps its not
|
| 522 |
+
# even less efficient
|
| 523 |
+
# data_len = write(read(cs))
|
| 524 |
+
# dbw += data_len
|
| 525 |
+
data = read(cs)
|
| 526 |
+
data_len = len(data)
|
| 527 |
+
dbw += data_len
|
| 528 |
+
write(data)
|
| 529 |
+
if data_len < cs or dbw == size:
|
| 530 |
+
break
|
| 531 |
+
# END check for stream end
|
| 532 |
+
# END duplicate data
|
| 533 |
+
return dbw
|
| 534 |
+
|
| 535 |
+
|
| 536 |
+
def connect_deltas(dstreams):
|
| 537 |
+
"""
|
| 538 |
+
Read the condensed delta chunk information from dstream and merge its information
|
| 539 |
+
into a list of existing delta chunks
|
| 540 |
+
|
| 541 |
+
:param dstreams: iterable of delta stream objects, the delta to be applied last
|
| 542 |
+
comes first, then all its ancestors in order
|
| 543 |
+
:return: DeltaChunkList, containing all operations to apply"""
|
| 544 |
+
tdcl = None # topmost dcl
|
| 545 |
+
|
| 546 |
+
dcl = tdcl = TopdownDeltaChunkList()
|
| 547 |
+
for dsi, ds in enumerate(dstreams):
|
| 548 |
+
# print "Stream", dsi
|
| 549 |
+
db = ds.read()
|
| 550 |
+
delta_buf_size = ds.size
|
| 551 |
+
|
| 552 |
+
# read header
|
| 553 |
+
i, base_size = msb_size(db)
|
| 554 |
+
i, target_size = msb_size(db, i)
|
| 555 |
+
|
| 556 |
+
# interpret opcodes
|
| 557 |
+
tbw = 0 # amount of target bytes written
|
| 558 |
+
while i < delta_buf_size:
|
| 559 |
+
c = ord(db[i])
|
| 560 |
+
i += 1
|
| 561 |
+
if c & 0x80:
|
| 562 |
+
cp_off, cp_size = 0, 0
|
| 563 |
+
if (c & 0x01):
|
| 564 |
+
cp_off = ord(db[i])
|
| 565 |
+
i += 1
|
| 566 |
+
if (c & 0x02):
|
| 567 |
+
cp_off |= (ord(db[i]) << 8)
|
| 568 |
+
i += 1
|
| 569 |
+
if (c & 0x04):
|
| 570 |
+
cp_off |= (ord(db[i]) << 16)
|
| 571 |
+
i += 1
|
| 572 |
+
if (c & 0x08):
|
| 573 |
+
cp_off |= (ord(db[i]) << 24)
|
| 574 |
+
i += 1
|
| 575 |
+
if (c & 0x10):
|
| 576 |
+
cp_size = ord(db[i])
|
| 577 |
+
i += 1
|
| 578 |
+
if (c & 0x20):
|
| 579 |
+
cp_size |= (ord(db[i]) << 8)
|
| 580 |
+
i += 1
|
| 581 |
+
if (c & 0x40):
|
| 582 |
+
cp_size |= (ord(db[i]) << 16)
|
| 583 |
+
i += 1
|
| 584 |
+
|
| 585 |
+
if not cp_size:
|
| 586 |
+
cp_size = 0x10000
|
| 587 |
+
|
| 588 |
+
rbound = cp_off + cp_size
|
| 589 |
+
if (rbound < cp_size or
|
| 590 |
+
rbound > base_size):
|
| 591 |
+
break
|
| 592 |
+
|
| 593 |
+
dcl.append(DeltaChunk(tbw, cp_size, cp_off, None))
|
| 594 |
+
tbw += cp_size
|
| 595 |
+
elif c:
|
| 596 |
+
# NOTE: in C, the data chunks should probably be concatenated here.
|
| 597 |
+
# In python, we do it as a post-process
|
| 598 |
+
dcl.append(DeltaChunk(tbw, c, 0, db[i:i + c]))
|
| 599 |
+
i += c
|
| 600 |
+
tbw += c
|
| 601 |
+
else:
|
| 602 |
+
raise ValueError("unexpected delta opcode 0")
|
| 603 |
+
# END handle command byte
|
| 604 |
+
# END while processing delta data
|
| 605 |
+
|
| 606 |
+
dcl.compress()
|
| 607 |
+
|
| 608 |
+
# merge the lists !
|
| 609 |
+
if dsi > 0:
|
| 610 |
+
if not tdcl.connect_with_next_base(dcl):
|
| 611 |
+
break
|
| 612 |
+
# END handle merge
|
| 613 |
+
|
| 614 |
+
# prepare next base
|
| 615 |
+
dcl = DeltaChunkList()
|
| 616 |
+
# END for each delta stream
|
| 617 |
+
|
| 618 |
+
return tdcl
|
| 619 |
+
|
| 620 |
+
|
| 621 |
+
def apply_delta_data(src_buf, src_buf_size, delta_buf, delta_buf_size, write):
|
| 622 |
+
"""
|
| 623 |
+
Apply data from a delta buffer using a source buffer to the target file
|
| 624 |
+
|
| 625 |
+
:param src_buf: random access data from which the delta was created
|
| 626 |
+
:param src_buf_size: size of the source buffer in bytes
|
| 627 |
+
:param delta_buf_size: size for the delta buffer in bytes
|
| 628 |
+
:param delta_buf: random access delta data
|
| 629 |
+
:param write: write method taking a chunk of bytes
|
| 630 |
+
|
| 631 |
+
**Note:** transcribed to python from the similar routine in patch-delta.c"""
|
| 632 |
+
i = 0
|
| 633 |
+
db = delta_buf
|
| 634 |
+
while i < delta_buf_size:
|
| 635 |
+
c = db[i]
|
| 636 |
+
i += 1
|
| 637 |
+
if c & 0x80:
|
| 638 |
+
cp_off, cp_size = 0, 0
|
| 639 |
+
if (c & 0x01):
|
| 640 |
+
cp_off = db[i]
|
| 641 |
+
i += 1
|
| 642 |
+
if (c & 0x02):
|
| 643 |
+
cp_off |= (db[i] << 8)
|
| 644 |
+
i += 1
|
| 645 |
+
if (c & 0x04):
|
| 646 |
+
cp_off |= (db[i] << 16)
|
| 647 |
+
i += 1
|
| 648 |
+
if (c & 0x08):
|
| 649 |
+
cp_off |= (db[i] << 24)
|
| 650 |
+
i += 1
|
| 651 |
+
if (c & 0x10):
|
| 652 |
+
cp_size = db[i]
|
| 653 |
+
i += 1
|
| 654 |
+
if (c & 0x20):
|
| 655 |
+
cp_size |= (db[i] << 8)
|
| 656 |
+
i += 1
|
| 657 |
+
if (c & 0x40):
|
| 658 |
+
cp_size |= (db[i] << 16)
|
| 659 |
+
i += 1
|
| 660 |
+
|
| 661 |
+
if not cp_size:
|
| 662 |
+
cp_size = 0x10000
|
| 663 |
+
|
| 664 |
+
rbound = cp_off + cp_size
|
| 665 |
+
if (rbound < cp_size or
|
| 666 |
+
rbound > src_buf_size):
|
| 667 |
+
break
|
| 668 |
+
write(src_buf[cp_off:cp_off + cp_size])
|
| 669 |
+
elif c:
|
| 670 |
+
write(db[i:i + c])
|
| 671 |
+
i += c
|
| 672 |
+
else:
|
| 673 |
+
raise ValueError("unexpected delta opcode 0")
|
| 674 |
+
# END handle command byte
|
| 675 |
+
# END while processing delta data
|
| 676 |
+
|
| 677 |
+
# yes, lets use the exact same error message that git uses :)
|
| 678 |
+
assert i == delta_buf_size, "delta replay has gone wild"
|
| 679 |
+
|
| 680 |
+
|
| 681 |
+
def is_equal_canonical_sha(canonical_length, match, sha1):
|
| 682 |
+
"""
|
| 683 |
+
:return: True if the given lhs and rhs 20 byte binary shas
|
| 684 |
+
The comparison will take the canonical_length of the match sha into account,
|
| 685 |
+
hence the comparison will only use the last 4 bytes for uneven canonical representations
|
| 686 |
+
:param match: less than 20 byte sha
|
| 687 |
+
:param sha1: 20 byte sha"""
|
| 688 |
+
binary_length = canonical_length // 2
|
| 689 |
+
if match[:binary_length] != sha1[:binary_length]:
|
| 690 |
+
return False
|
| 691 |
+
|
| 692 |
+
if canonical_length - binary_length and \
|
| 693 |
+
(byte_ord(match[-1]) ^ byte_ord(sha1[len(match) - 1])) & 0xf0:
|
| 694 |
+
return False
|
| 695 |
+
# END handle uneven canonnical length
|
| 696 |
+
return True
|
| 697 |
+
|
| 698 |
+
#} END routines
|
| 699 |
+
|
| 700 |
+
|
| 701 |
+
try:
|
| 702 |
+
from gitdb_speedups._perf import connect_deltas
|
| 703 |
+
except ImportError:
|
| 704 |
+
pass
|
vllm/lib/python3.10/site-packages/gitdb/pack.py
ADDED
|
@@ -0,0 +1,1031 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
|
| 2 |
+
#
|
| 3 |
+
# This module is part of GitDB and is released under
|
| 4 |
+
# the New BSD License: https://opensource.org/license/bsd-3-clause/
|
| 5 |
+
"""Contains PackIndexFile and PackFile implementations"""
|
| 6 |
+
import zlib
|
| 7 |
+
|
| 8 |
+
from gitdb.exc import (
|
| 9 |
+
BadObject,
|
| 10 |
+
AmbiguousObjectName,
|
| 11 |
+
UnsupportedOperation,
|
| 12 |
+
ParseError
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
from gitdb.util import (
|
| 16 |
+
mman,
|
| 17 |
+
LazyMixin,
|
| 18 |
+
unpack_from,
|
| 19 |
+
bin_to_hex,
|
| 20 |
+
byte_ord,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
from gitdb.fun import (
|
| 24 |
+
create_pack_object_header,
|
| 25 |
+
pack_object_header_info,
|
| 26 |
+
is_equal_canonical_sha,
|
| 27 |
+
type_id_to_type_map,
|
| 28 |
+
write_object,
|
| 29 |
+
stream_copy,
|
| 30 |
+
chunk_size,
|
| 31 |
+
delta_types,
|
| 32 |
+
OFS_DELTA,
|
| 33 |
+
REF_DELTA,
|
| 34 |
+
msb_size
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
try:
|
| 38 |
+
from gitdb_speedups._perf import PackIndexFile_sha_to_index
|
| 39 |
+
except ImportError:
|
| 40 |
+
pass
|
| 41 |
+
# END try c module
|
| 42 |
+
|
| 43 |
+
from gitdb.base import ( # Amazing !
|
| 44 |
+
OInfo,
|
| 45 |
+
OStream,
|
| 46 |
+
OPackInfo,
|
| 47 |
+
OPackStream,
|
| 48 |
+
ODeltaStream,
|
| 49 |
+
ODeltaPackInfo,
|
| 50 |
+
ODeltaPackStream,
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
from gitdb.stream import (
|
| 54 |
+
DecompressMemMapReader,
|
| 55 |
+
DeltaApplyReader,
|
| 56 |
+
Sha1Writer,
|
| 57 |
+
NullStream,
|
| 58 |
+
FlexibleSha1Writer
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
from struct import pack
|
| 62 |
+
from binascii import crc32
|
| 63 |
+
|
| 64 |
+
from gitdb.const import NULL_BYTE
|
| 65 |
+
|
| 66 |
+
import tempfile
|
| 67 |
+
import array
|
| 68 |
+
import os
|
| 69 |
+
import sys
|
| 70 |
+
|
| 71 |
+
__all__ = ('PackIndexFile', 'PackFile', 'PackEntity')
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
#{ Utilities
|
| 75 |
+
|
| 76 |
+
def pack_object_at(cursor, offset, as_stream):
|
| 77 |
+
"""
|
| 78 |
+
:return: Tuple(abs_data_offset, PackInfo|PackStream)
|
| 79 |
+
an object of the correct type according to the type_id of the object.
|
| 80 |
+
If as_stream is True, the object will contain a stream, allowing the
|
| 81 |
+
data to be read decompressed.
|
| 82 |
+
:param data: random accessible data containing all required information
|
| 83 |
+
:parma offset: offset in to the data at which the object information is located
|
| 84 |
+
:param as_stream: if True, a stream object will be returned that can read
|
| 85 |
+
the data, otherwise you receive an info object only"""
|
| 86 |
+
data = cursor.use_region(offset).buffer()
|
| 87 |
+
type_id, uncomp_size, data_rela_offset = pack_object_header_info(data)
|
| 88 |
+
total_rela_offset = None # set later, actual offset until data stream begins
|
| 89 |
+
delta_info = None
|
| 90 |
+
|
| 91 |
+
# OFFSET DELTA
|
| 92 |
+
if type_id == OFS_DELTA:
|
| 93 |
+
i = data_rela_offset
|
| 94 |
+
c = byte_ord(data[i])
|
| 95 |
+
i += 1
|
| 96 |
+
delta_offset = c & 0x7f
|
| 97 |
+
while c & 0x80:
|
| 98 |
+
c = byte_ord(data[i])
|
| 99 |
+
i += 1
|
| 100 |
+
delta_offset += 1
|
| 101 |
+
delta_offset = (delta_offset << 7) + (c & 0x7f)
|
| 102 |
+
# END character loop
|
| 103 |
+
delta_info = delta_offset
|
| 104 |
+
total_rela_offset = i
|
| 105 |
+
# REF DELTA
|
| 106 |
+
elif type_id == REF_DELTA:
|
| 107 |
+
total_rela_offset = data_rela_offset + 20
|
| 108 |
+
delta_info = data[data_rela_offset:total_rela_offset]
|
| 109 |
+
# BASE OBJECT
|
| 110 |
+
else:
|
| 111 |
+
# assume its a base object
|
| 112 |
+
total_rela_offset = data_rela_offset
|
| 113 |
+
# END handle type id
|
| 114 |
+
abs_data_offset = offset + total_rela_offset
|
| 115 |
+
if as_stream:
|
| 116 |
+
stream = DecompressMemMapReader(data[total_rela_offset:], False, uncomp_size)
|
| 117 |
+
if delta_info is None:
|
| 118 |
+
return abs_data_offset, OPackStream(offset, type_id, uncomp_size, stream)
|
| 119 |
+
else:
|
| 120 |
+
return abs_data_offset, ODeltaPackStream(offset, type_id, uncomp_size, delta_info, stream)
|
| 121 |
+
else:
|
| 122 |
+
if delta_info is None:
|
| 123 |
+
return abs_data_offset, OPackInfo(offset, type_id, uncomp_size)
|
| 124 |
+
else:
|
| 125 |
+
return abs_data_offset, ODeltaPackInfo(offset, type_id, uncomp_size, delta_info)
|
| 126 |
+
# END handle info
|
| 127 |
+
# END handle stream
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def write_stream_to_pack(read, write, zstream, base_crc=None):
|
| 131 |
+
"""Copy a stream as read from read function, zip it, and write the result.
|
| 132 |
+
Count the number of written bytes and return it
|
| 133 |
+
:param base_crc: if not None, the crc will be the base for all compressed data
|
| 134 |
+
we consecutively write and generate a crc32 from. If None, no crc will be generated
|
| 135 |
+
:return: tuple(no bytes read, no bytes written, crc32) crc might be 0 if base_crc
|
| 136 |
+
was false"""
|
| 137 |
+
br = 0 # bytes read
|
| 138 |
+
bw = 0 # bytes written
|
| 139 |
+
want_crc = base_crc is not None
|
| 140 |
+
crc = 0
|
| 141 |
+
if want_crc:
|
| 142 |
+
crc = base_crc
|
| 143 |
+
# END initialize crc
|
| 144 |
+
|
| 145 |
+
while True:
|
| 146 |
+
chunk = read(chunk_size)
|
| 147 |
+
br += len(chunk)
|
| 148 |
+
compressed = zstream.compress(chunk)
|
| 149 |
+
bw += len(compressed)
|
| 150 |
+
write(compressed) # cannot assume return value
|
| 151 |
+
|
| 152 |
+
if want_crc:
|
| 153 |
+
crc = crc32(compressed, crc)
|
| 154 |
+
# END handle crc
|
| 155 |
+
|
| 156 |
+
if len(chunk) != chunk_size:
|
| 157 |
+
break
|
| 158 |
+
# END copy loop
|
| 159 |
+
|
| 160 |
+
compressed = zstream.flush()
|
| 161 |
+
bw += len(compressed)
|
| 162 |
+
write(compressed)
|
| 163 |
+
if want_crc:
|
| 164 |
+
crc = crc32(compressed, crc)
|
| 165 |
+
# END handle crc
|
| 166 |
+
|
| 167 |
+
return (br, bw, crc)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
#} END utilities
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
class IndexWriter:
|
| 174 |
+
|
| 175 |
+
"""Utility to cache index information, allowing to write all information later
|
| 176 |
+
in one go to the given stream
|
| 177 |
+
**Note:** currently only writes v2 indices"""
|
| 178 |
+
__slots__ = '_objs'
|
| 179 |
+
|
| 180 |
+
def __init__(self):
|
| 181 |
+
self._objs = list()
|
| 182 |
+
|
| 183 |
+
def append(self, binsha, crc, offset):
|
| 184 |
+
"""Append one piece of object information"""
|
| 185 |
+
self._objs.append((binsha, crc, offset))
|
| 186 |
+
|
| 187 |
+
def write(self, pack_sha, write):
|
| 188 |
+
"""Write the index file using the given write method
|
| 189 |
+
:param pack_sha: binary sha over the whole pack that we index
|
| 190 |
+
:return: sha1 binary sha over all index file contents"""
|
| 191 |
+
# sort for sha1 hash
|
| 192 |
+
self._objs.sort(key=lambda o: o[0])
|
| 193 |
+
|
| 194 |
+
sha_writer = FlexibleSha1Writer(write)
|
| 195 |
+
sha_write = sha_writer.write
|
| 196 |
+
sha_write(PackIndexFile.index_v2_signature)
|
| 197 |
+
sha_write(pack(">L", PackIndexFile.index_version_default))
|
| 198 |
+
|
| 199 |
+
# fanout
|
| 200 |
+
tmplist = list((0,) * 256) # fanout or list with 64 bit offsets
|
| 201 |
+
for t in self._objs:
|
| 202 |
+
tmplist[byte_ord(t[0][0])] += 1
|
| 203 |
+
# END prepare fanout
|
| 204 |
+
for i in range(255):
|
| 205 |
+
v = tmplist[i]
|
| 206 |
+
sha_write(pack('>L', v))
|
| 207 |
+
tmplist[i + 1] += v
|
| 208 |
+
# END write each fanout entry
|
| 209 |
+
sha_write(pack('>L', tmplist[255]))
|
| 210 |
+
|
| 211 |
+
# sha1 ordered
|
| 212 |
+
# save calls, that is push them into c
|
| 213 |
+
sha_write(b''.join(t[0] for t in self._objs))
|
| 214 |
+
|
| 215 |
+
# crc32
|
| 216 |
+
for t in self._objs:
|
| 217 |
+
sha_write(pack('>L', t[1] & 0xffffffff))
|
| 218 |
+
# END for each crc
|
| 219 |
+
|
| 220 |
+
tmplist = list()
|
| 221 |
+
# offset 32
|
| 222 |
+
for t in self._objs:
|
| 223 |
+
ofs = t[2]
|
| 224 |
+
if ofs > 0x7fffffff:
|
| 225 |
+
tmplist.append(ofs)
|
| 226 |
+
ofs = 0x80000000 + len(tmplist) - 1
|
| 227 |
+
# END handle 64 bit offsets
|
| 228 |
+
sha_write(pack('>L', ofs & 0xffffffff))
|
| 229 |
+
# END for each offset
|
| 230 |
+
|
| 231 |
+
# offset 64
|
| 232 |
+
for ofs in tmplist:
|
| 233 |
+
sha_write(pack(">Q", ofs))
|
| 234 |
+
# END for each offset
|
| 235 |
+
|
| 236 |
+
# trailer
|
| 237 |
+
assert(len(pack_sha) == 20)
|
| 238 |
+
sha_write(pack_sha)
|
| 239 |
+
sha = sha_writer.sha(as_hex=False)
|
| 240 |
+
write(sha)
|
| 241 |
+
return sha
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
class PackIndexFile(LazyMixin):
|
| 245 |
+
|
| 246 |
+
"""A pack index provides offsets into the corresponding pack, allowing to find
|
| 247 |
+
locations for offsets faster."""
|
| 248 |
+
|
| 249 |
+
# Dont use slots as we dynamically bind functions for each version, need a dict for this
|
| 250 |
+
# The slots you see here are just to keep track of our instance variables
|
| 251 |
+
# __slots__ = ('_indexpath', '_fanout_table', '_cursor', '_version',
|
| 252 |
+
# '_sha_list_offset', '_crc_list_offset', '_pack_offset', '_pack_64_offset')
|
| 253 |
+
|
| 254 |
+
# used in v2 indices
|
| 255 |
+
_sha_list_offset = 8 + 1024
|
| 256 |
+
index_v2_signature = b'\xfftOc'
|
| 257 |
+
index_version_default = 2
|
| 258 |
+
|
| 259 |
+
def __init__(self, indexpath):
|
| 260 |
+
super().__init__()
|
| 261 |
+
self._indexpath = indexpath
|
| 262 |
+
|
| 263 |
+
def close(self):
|
| 264 |
+
mman.force_map_handle_removal_win(self._indexpath)
|
| 265 |
+
self._cursor = None
|
| 266 |
+
|
| 267 |
+
def _set_cache_(self, attr):
|
| 268 |
+
if attr == "_packfile_checksum":
|
| 269 |
+
self._packfile_checksum = self._cursor.map()[-40:-20]
|
| 270 |
+
elif attr == "_packfile_checksum":
|
| 271 |
+
self._packfile_checksum = self._cursor.map()[-20:]
|
| 272 |
+
elif attr == "_cursor":
|
| 273 |
+
# Note: We don't lock the file when reading as we cannot be sure
|
| 274 |
+
# that we can actually write to the location - it could be a read-only
|
| 275 |
+
# alternate for instance
|
| 276 |
+
self._cursor = mman.make_cursor(self._indexpath).use_region()
|
| 277 |
+
# We will assume that the index will always fully fit into memory !
|
| 278 |
+
if mman.window_size() > 0 and self._cursor.file_size() > mman.window_size():
|
| 279 |
+
raise AssertionError("The index file at %s is too large to fit into a mapped window (%i > %i). This is a limitation of the implementation" % (
|
| 280 |
+
self._indexpath, self._cursor.file_size(), mman.window_size()))
|
| 281 |
+
# END assert window size
|
| 282 |
+
else:
|
| 283 |
+
# now its time to initialize everything - if we are here, someone wants
|
| 284 |
+
# to access the fanout table or related properties
|
| 285 |
+
|
| 286 |
+
# CHECK VERSION
|
| 287 |
+
mmap = self._cursor.map()
|
| 288 |
+
self._version = (mmap[:4] == self.index_v2_signature and 2) or 1
|
| 289 |
+
if self._version == 2:
|
| 290 |
+
version_id = unpack_from(">L", mmap, 4)[0]
|
| 291 |
+
assert version_id == self._version, "Unsupported index version: %i" % version_id
|
| 292 |
+
# END assert version
|
| 293 |
+
|
| 294 |
+
# SETUP FUNCTIONS
|
| 295 |
+
# setup our functions according to the actual version
|
| 296 |
+
for fname in ('entry', 'offset', 'sha', 'crc'):
|
| 297 |
+
setattr(self, fname, getattr(self, "_%s_v%i" % (fname, self._version)))
|
| 298 |
+
# END for each function to initialize
|
| 299 |
+
|
| 300 |
+
# INITIALIZE DATA
|
| 301 |
+
# byte offset is 8 if version is 2, 0 otherwise
|
| 302 |
+
self._initialize()
|
| 303 |
+
# END handle attributes
|
| 304 |
+
|
| 305 |
+
#{ Access V1
|
| 306 |
+
|
| 307 |
+
def _entry_v1(self, i):
|
| 308 |
+
""":return: tuple(offset, binsha, 0)"""
|
| 309 |
+
return unpack_from(">L20s", self._cursor.map(), 1024 + i * 24) + (0, )
|
| 310 |
+
|
| 311 |
+
def _offset_v1(self, i):
|
| 312 |
+
"""see ``_offset_v2``"""
|
| 313 |
+
return unpack_from(">L", self._cursor.map(), 1024 + i * 24)[0]
|
| 314 |
+
|
| 315 |
+
def _sha_v1(self, i):
|
| 316 |
+
"""see ``_sha_v2``"""
|
| 317 |
+
base = 1024 + (i * 24) + 4
|
| 318 |
+
return self._cursor.map()[base:base + 20]
|
| 319 |
+
|
| 320 |
+
def _crc_v1(self, i):
|
| 321 |
+
"""unsupported"""
|
| 322 |
+
return 0
|
| 323 |
+
|
| 324 |
+
#} END access V1
|
| 325 |
+
|
| 326 |
+
#{ Access V2
|
| 327 |
+
def _entry_v2(self, i):
|
| 328 |
+
""":return: tuple(offset, binsha, crc)"""
|
| 329 |
+
return (self._offset_v2(i), self._sha_v2(i), self._crc_v2(i))
|
| 330 |
+
|
| 331 |
+
def _offset_v2(self, i):
|
| 332 |
+
""":return: 32 or 64 byte offset into pack files. 64 byte offsets will only
|
| 333 |
+
be returned if the pack is larger than 4 GiB, or 2^32"""
|
| 334 |
+
offset = unpack_from(">L", self._cursor.map(), self._pack_offset + i * 4)[0]
|
| 335 |
+
|
| 336 |
+
# if the high-bit is set, this indicates that we have to lookup the offset
|
| 337 |
+
# in the 64 bit region of the file. The current offset ( lower 31 bits )
|
| 338 |
+
# are the index into it
|
| 339 |
+
if offset & 0x80000000:
|
| 340 |
+
offset = unpack_from(">Q", self._cursor.map(), self._pack_64_offset + (offset & ~0x80000000) * 8)[0]
|
| 341 |
+
# END handle 64 bit offset
|
| 342 |
+
|
| 343 |
+
return offset
|
| 344 |
+
|
| 345 |
+
def _sha_v2(self, i):
|
| 346 |
+
""":return: sha at the given index of this file index instance"""
|
| 347 |
+
base = self._sha_list_offset + i * 20
|
| 348 |
+
return self._cursor.map()[base:base + 20]
|
| 349 |
+
|
| 350 |
+
def _crc_v2(self, i):
|
| 351 |
+
""":return: 4 bytes crc for the object at index i"""
|
| 352 |
+
return unpack_from(">L", self._cursor.map(), self._crc_list_offset + i * 4)[0]
|
| 353 |
+
|
| 354 |
+
#} END access V2
|
| 355 |
+
|
| 356 |
+
#{ Initialization
|
| 357 |
+
|
| 358 |
+
def _initialize(self):
|
| 359 |
+
"""initialize base data"""
|
| 360 |
+
self._fanout_table = self._read_fanout((self._version == 2) * 8)
|
| 361 |
+
|
| 362 |
+
if self._version == 2:
|
| 363 |
+
self._crc_list_offset = self._sha_list_offset + self.size() * 20
|
| 364 |
+
self._pack_offset = self._crc_list_offset + self.size() * 4
|
| 365 |
+
self._pack_64_offset = self._pack_offset + self.size() * 4
|
| 366 |
+
# END setup base
|
| 367 |
+
|
| 368 |
+
def _read_fanout(self, byte_offset):
|
| 369 |
+
"""Generate a fanout table from our data"""
|
| 370 |
+
d = self._cursor.map()
|
| 371 |
+
out = list()
|
| 372 |
+
append = out.append
|
| 373 |
+
for i in range(256):
|
| 374 |
+
append(unpack_from('>L', d, byte_offset + i * 4)[0])
|
| 375 |
+
# END for each entry
|
| 376 |
+
return out
|
| 377 |
+
|
| 378 |
+
#} END initialization
|
| 379 |
+
|
| 380 |
+
#{ Properties
|
| 381 |
+
def version(self):
|
| 382 |
+
return self._version
|
| 383 |
+
|
| 384 |
+
def size(self):
|
| 385 |
+
""":return: amount of objects referred to by this index"""
|
| 386 |
+
return self._fanout_table[255]
|
| 387 |
+
|
| 388 |
+
def path(self):
|
| 389 |
+
""":return: path to the packindexfile"""
|
| 390 |
+
return self._indexpath
|
| 391 |
+
|
| 392 |
+
def packfile_checksum(self):
|
| 393 |
+
""":return: 20 byte sha representing the sha1 hash of the pack file"""
|
| 394 |
+
return self._cursor.map()[-40:-20]
|
| 395 |
+
|
| 396 |
+
def indexfile_checksum(self):
|
| 397 |
+
""":return: 20 byte sha representing the sha1 hash of this index file"""
|
| 398 |
+
return self._cursor.map()[-20:]
|
| 399 |
+
|
| 400 |
+
def offsets(self):
|
| 401 |
+
""":return: sequence of all offsets in the order in which they were written
|
| 402 |
+
|
| 403 |
+
**Note:** return value can be random accessed, but may be immmutable"""
|
| 404 |
+
if self._version == 2:
|
| 405 |
+
# read stream to array, convert to tuple
|
| 406 |
+
a = array.array('I') # 4 byte unsigned int, long are 8 byte on 64 bit it appears
|
| 407 |
+
a.frombytes(self._cursor.map()[self._pack_offset:self._pack_64_offset])
|
| 408 |
+
|
| 409 |
+
# networkbyteorder to something array likes more
|
| 410 |
+
if sys.byteorder == 'little':
|
| 411 |
+
a.byteswap()
|
| 412 |
+
return a
|
| 413 |
+
else:
|
| 414 |
+
return tuple(self.offset(index) for index in range(self.size()))
|
| 415 |
+
# END handle version
|
| 416 |
+
|
| 417 |
+
def sha_to_index(self, sha):
|
| 418 |
+
"""
|
| 419 |
+
:return: index usable with the ``offset`` or ``entry`` method, or None
|
| 420 |
+
if the sha was not found in this pack index
|
| 421 |
+
:param sha: 20 byte sha to lookup"""
|
| 422 |
+
first_byte = byte_ord(sha[0])
|
| 423 |
+
get_sha = self.sha
|
| 424 |
+
lo = 0 # lower index, the left bound of the bisection
|
| 425 |
+
if first_byte != 0:
|
| 426 |
+
lo = self._fanout_table[first_byte - 1]
|
| 427 |
+
hi = self._fanout_table[first_byte] # the upper, right bound of the bisection
|
| 428 |
+
|
| 429 |
+
# bisect until we have the sha
|
| 430 |
+
while lo < hi:
|
| 431 |
+
mid = (lo + hi) // 2
|
| 432 |
+
mid_sha = get_sha(mid)
|
| 433 |
+
if sha < mid_sha:
|
| 434 |
+
hi = mid
|
| 435 |
+
elif sha == mid_sha:
|
| 436 |
+
return mid
|
| 437 |
+
else:
|
| 438 |
+
lo = mid + 1
|
| 439 |
+
# END handle midpoint
|
| 440 |
+
# END bisect
|
| 441 |
+
return None
|
| 442 |
+
|
| 443 |
+
def partial_sha_to_index(self, partial_bin_sha, canonical_length):
|
| 444 |
+
"""
|
| 445 |
+
:return: index as in `sha_to_index` or None if the sha was not found in this
|
| 446 |
+
index file
|
| 447 |
+
:param partial_bin_sha: an at least two bytes of a partial binary sha as bytes
|
| 448 |
+
:param canonical_length: length of the original hexadecimal representation of the
|
| 449 |
+
given partial binary sha
|
| 450 |
+
:raise AmbiguousObjectName:"""
|
| 451 |
+
if len(partial_bin_sha) < 2:
|
| 452 |
+
raise ValueError("Require at least 2 bytes of partial sha")
|
| 453 |
+
|
| 454 |
+
assert isinstance(partial_bin_sha, bytes), "partial_bin_sha must be bytes"
|
| 455 |
+
first_byte = byte_ord(partial_bin_sha[0])
|
| 456 |
+
|
| 457 |
+
get_sha = self.sha
|
| 458 |
+
lo = 0 # lower index, the left bound of the bisection
|
| 459 |
+
if first_byte != 0:
|
| 460 |
+
lo = self._fanout_table[first_byte - 1]
|
| 461 |
+
hi = self._fanout_table[first_byte] # the upper, right bound of the bisection
|
| 462 |
+
|
| 463 |
+
# fill the partial to full 20 bytes
|
| 464 |
+
filled_sha = partial_bin_sha + NULL_BYTE * (20 - len(partial_bin_sha))
|
| 465 |
+
|
| 466 |
+
# find lowest
|
| 467 |
+
while lo < hi:
|
| 468 |
+
mid = (lo + hi) // 2
|
| 469 |
+
mid_sha = get_sha(mid)
|
| 470 |
+
if filled_sha < mid_sha:
|
| 471 |
+
hi = mid
|
| 472 |
+
elif filled_sha == mid_sha:
|
| 473 |
+
# perfect match
|
| 474 |
+
lo = mid
|
| 475 |
+
break
|
| 476 |
+
else:
|
| 477 |
+
lo = mid + 1
|
| 478 |
+
# END handle midpoint
|
| 479 |
+
# END bisect
|
| 480 |
+
|
| 481 |
+
if lo < self.size():
|
| 482 |
+
cur_sha = get_sha(lo)
|
| 483 |
+
if is_equal_canonical_sha(canonical_length, partial_bin_sha, cur_sha):
|
| 484 |
+
next_sha = None
|
| 485 |
+
if lo + 1 < self.size():
|
| 486 |
+
next_sha = get_sha(lo + 1)
|
| 487 |
+
if next_sha and next_sha == cur_sha:
|
| 488 |
+
raise AmbiguousObjectName(partial_bin_sha)
|
| 489 |
+
return lo
|
| 490 |
+
# END if we have a match
|
| 491 |
+
# END if we found something
|
| 492 |
+
return None
|
| 493 |
+
|
| 494 |
+
if 'PackIndexFile_sha_to_index' in globals():
|
| 495 |
+
# NOTE: Its just about 25% faster, the major bottleneck might be the attr
|
| 496 |
+
# accesses
|
| 497 |
+
def sha_to_index(self, sha):
|
| 498 |
+
return PackIndexFile_sha_to_index(self, sha)
|
| 499 |
+
# END redefine heavy-hitter with c version
|
| 500 |
+
|
| 501 |
+
#} END properties
|
| 502 |
+
|
| 503 |
+
|
| 504 |
+
class PackFile(LazyMixin):
|
| 505 |
+
|
| 506 |
+
"""A pack is a file written according to the Version 2 for git packs
|
| 507 |
+
|
| 508 |
+
As we currently use memory maps, it could be assumed that the maximum size of
|
| 509 |
+
packs therefore is 32 bit on 32 bit systems. On 64 bit systems, this should be
|
| 510 |
+
fine though.
|
| 511 |
+
|
| 512 |
+
**Note:** at some point, this might be implemented using streams as well, or
|
| 513 |
+
streams are an alternate path in the case memory maps cannot be created
|
| 514 |
+
for some reason - one clearly doesn't want to read 10GB at once in that
|
| 515 |
+
case"""
|
| 516 |
+
|
| 517 |
+
__slots__ = ('_packpath', '_cursor', '_size', '_version')
|
| 518 |
+
pack_signature = 0x5041434b # 'PACK'
|
| 519 |
+
pack_version_default = 2
|
| 520 |
+
|
| 521 |
+
# offset into our data at which the first object starts
|
| 522 |
+
first_object_offset = 3 * 4 # header bytes
|
| 523 |
+
footer_size = 20 # final sha
|
| 524 |
+
|
| 525 |
+
def __init__(self, packpath):
|
| 526 |
+
self._packpath = packpath
|
| 527 |
+
|
| 528 |
+
def close(self):
|
| 529 |
+
mman.force_map_handle_removal_win(self._packpath)
|
| 530 |
+
self._cursor = None
|
| 531 |
+
|
| 532 |
+
def _set_cache_(self, attr):
|
| 533 |
+
# we fill the whole cache, whichever attribute gets queried first
|
| 534 |
+
self._cursor = mman.make_cursor(self._packpath).use_region()
|
| 535 |
+
|
| 536 |
+
# read the header information
|
| 537 |
+
type_id, self._version, self._size = unpack_from(">LLL", self._cursor.map(), 0)
|
| 538 |
+
|
| 539 |
+
# TODO: figure out whether we should better keep the lock, or maybe
|
| 540 |
+
# add a .keep file instead ?
|
| 541 |
+
if type_id != self.pack_signature:
|
| 542 |
+
raise ParseError("Invalid pack signature: %i" % type_id)
|
| 543 |
+
|
| 544 |
+
def _iter_objects(self, start_offset, as_stream=True):
|
| 545 |
+
"""Handle the actual iteration of objects within this pack"""
|
| 546 |
+
c = self._cursor
|
| 547 |
+
content_size = c.file_size() - self.footer_size
|
| 548 |
+
cur_offset = start_offset or self.first_object_offset
|
| 549 |
+
|
| 550 |
+
null = NullStream()
|
| 551 |
+
while cur_offset < content_size:
|
| 552 |
+
data_offset, ostream = pack_object_at(c, cur_offset, True)
|
| 553 |
+
# scrub the stream to the end - this decompresses the object, but yields
|
| 554 |
+
# the amount of compressed bytes we need to get to the next offset
|
| 555 |
+
|
| 556 |
+
stream_copy(ostream.read, null.write, ostream.size, chunk_size)
|
| 557 |
+
assert ostream.stream._br == ostream.size
|
| 558 |
+
cur_offset += (data_offset - ostream.pack_offset) + ostream.stream.compressed_bytes_read()
|
| 559 |
+
|
| 560 |
+
# if a stream is requested, reset it beforehand
|
| 561 |
+
# Otherwise return the Stream object directly, its derived from the
|
| 562 |
+
# info object
|
| 563 |
+
if as_stream:
|
| 564 |
+
ostream.stream.seek(0)
|
| 565 |
+
yield ostream
|
| 566 |
+
# END until we have read everything
|
| 567 |
+
|
| 568 |
+
#{ Pack Information
|
| 569 |
+
|
| 570 |
+
def size(self):
|
| 571 |
+
""":return: The amount of objects stored in this pack"""
|
| 572 |
+
return self._size
|
| 573 |
+
|
| 574 |
+
def version(self):
|
| 575 |
+
""":return: the version of this pack"""
|
| 576 |
+
return self._version
|
| 577 |
+
|
| 578 |
+
def data(self):
|
| 579 |
+
"""
|
| 580 |
+
:return: read-only data of this pack. It provides random access and usually
|
| 581 |
+
is a memory map.
|
| 582 |
+
:note: This method is unsafe as it returns a window into a file which might be larger than than the actual window size"""
|
| 583 |
+
# can use map as we are starting at offset 0. Otherwise we would have to use buffer()
|
| 584 |
+
return self._cursor.use_region().map()
|
| 585 |
+
|
| 586 |
+
def checksum(self):
|
| 587 |
+
""":return: 20 byte sha1 hash on all object sha's contained in this file"""
|
| 588 |
+
return self._cursor.use_region(self._cursor.file_size() - 20).buffer()[:]
|
| 589 |
+
|
| 590 |
+
def path(self):
|
| 591 |
+
""":return: path to the packfile"""
|
| 592 |
+
return self._packpath
|
| 593 |
+
#} END pack information
|
| 594 |
+
|
| 595 |
+
#{ Pack Specific
|
| 596 |
+
|
| 597 |
+
def collect_streams(self, offset):
|
| 598 |
+
"""
|
| 599 |
+
:return: list of pack streams which are required to build the object
|
| 600 |
+
at the given offset. The first entry of the list is the object at offset,
|
| 601 |
+
the last one is either a full object, or a REF_Delta stream. The latter
|
| 602 |
+
type needs its reference object to be locked up in an ODB to form a valid
|
| 603 |
+
delta chain.
|
| 604 |
+
If the object at offset is no delta, the size of the list is 1.
|
| 605 |
+
:param offset: specifies the first byte of the object within this pack"""
|
| 606 |
+
out = list()
|
| 607 |
+
c = self._cursor
|
| 608 |
+
while True:
|
| 609 |
+
ostream = pack_object_at(c, offset, True)[1]
|
| 610 |
+
out.append(ostream)
|
| 611 |
+
if ostream.type_id == OFS_DELTA:
|
| 612 |
+
offset = ostream.pack_offset - ostream.delta_info
|
| 613 |
+
else:
|
| 614 |
+
# the only thing we can lookup are OFFSET deltas. Everything
|
| 615 |
+
# else is either an object, or a ref delta, in the latter
|
| 616 |
+
# case someone else has to find it
|
| 617 |
+
break
|
| 618 |
+
# END handle type
|
| 619 |
+
# END while chaining streams
|
| 620 |
+
return out
|
| 621 |
+
|
| 622 |
+
#} END pack specific
|
| 623 |
+
|
| 624 |
+
#{ Read-Database like Interface
|
| 625 |
+
|
| 626 |
+
def info(self, offset):
|
| 627 |
+
"""Retrieve information about the object at the given file-absolute offset
|
| 628 |
+
|
| 629 |
+
:param offset: byte offset
|
| 630 |
+
:return: OPackInfo instance, the actual type differs depending on the type_id attribute"""
|
| 631 |
+
return pack_object_at(self._cursor, offset or self.first_object_offset, False)[1]
|
| 632 |
+
|
| 633 |
+
def stream(self, offset):
|
| 634 |
+
"""Retrieve an object at the given file-relative offset as stream along with its information
|
| 635 |
+
|
| 636 |
+
:param offset: byte offset
|
| 637 |
+
:return: OPackStream instance, the actual type differs depending on the type_id attribute"""
|
| 638 |
+
return pack_object_at(self._cursor, offset or self.first_object_offset, True)[1]
|
| 639 |
+
|
| 640 |
+
def stream_iter(self, start_offset=0):
|
| 641 |
+
"""
|
| 642 |
+
:return: iterator yielding OPackStream compatible instances, allowing
|
| 643 |
+
to access the data in the pack directly.
|
| 644 |
+
:param start_offset: offset to the first object to iterate. If 0, iteration
|
| 645 |
+
starts at the very first object in the pack.
|
| 646 |
+
|
| 647 |
+
**Note:** Iterating a pack directly is costly as the datastream has to be decompressed
|
| 648 |
+
to determine the bounds between the objects"""
|
| 649 |
+
return self._iter_objects(start_offset, as_stream=True)
|
| 650 |
+
|
| 651 |
+
#} END Read-Database like Interface
|
| 652 |
+
|
| 653 |
+
|
| 654 |
+
class PackEntity(LazyMixin):
|
| 655 |
+
|
| 656 |
+
"""Combines the PackIndexFile and the PackFile into one, allowing the
|
| 657 |
+
actual objects to be resolved and iterated"""
|
| 658 |
+
|
| 659 |
+
__slots__ = ('_index', # our index file
|
| 660 |
+
'_pack', # our pack file
|
| 661 |
+
'_offset_map' # on demand dict mapping one offset to the next consecutive one
|
| 662 |
+
)
|
| 663 |
+
|
| 664 |
+
IndexFileCls = PackIndexFile
|
| 665 |
+
PackFileCls = PackFile
|
| 666 |
+
|
| 667 |
+
def __init__(self, pack_or_index_path):
|
| 668 |
+
"""Initialize ourselves with the path to the respective pack or index file"""
|
| 669 |
+
basename, ext = os.path.splitext(pack_or_index_path)
|
| 670 |
+
self._index = self.IndexFileCls("%s.idx" % basename) # PackIndexFile instance
|
| 671 |
+
self._pack = self.PackFileCls("%s.pack" % basename) # corresponding PackFile instance
|
| 672 |
+
|
| 673 |
+
def close(self):
|
| 674 |
+
self._index.close()
|
| 675 |
+
self._pack.close()
|
| 676 |
+
|
| 677 |
+
def _set_cache_(self, attr):
|
| 678 |
+
# currently this can only be _offset_map
|
| 679 |
+
# TODO: make this a simple sorted offset array which can be bisected
|
| 680 |
+
# to find the respective entry, from which we can take a +1 easily
|
| 681 |
+
# This might be slower, but should also be much lighter in memory !
|
| 682 |
+
offsets_sorted = sorted(self._index.offsets())
|
| 683 |
+
last_offset = len(self._pack.data()) - self._pack.footer_size
|
| 684 |
+
assert offsets_sorted, "Cannot handle empty indices"
|
| 685 |
+
|
| 686 |
+
offset_map = None
|
| 687 |
+
if len(offsets_sorted) == 1:
|
| 688 |
+
offset_map = {offsets_sorted[0]: last_offset}
|
| 689 |
+
else:
|
| 690 |
+
iter_offsets = iter(offsets_sorted)
|
| 691 |
+
iter_offsets_plus_one = iter(offsets_sorted)
|
| 692 |
+
next(iter_offsets_plus_one)
|
| 693 |
+
consecutive = zip(iter_offsets, iter_offsets_plus_one)
|
| 694 |
+
|
| 695 |
+
offset_map = dict(consecutive)
|
| 696 |
+
|
| 697 |
+
# the last offset is not yet set
|
| 698 |
+
offset_map[offsets_sorted[-1]] = last_offset
|
| 699 |
+
# END handle offset amount
|
| 700 |
+
self._offset_map = offset_map
|
| 701 |
+
|
| 702 |
+
def _sha_to_index(self, sha):
|
| 703 |
+
""":return: index for the given sha, or raise"""
|
| 704 |
+
index = self._index.sha_to_index(sha)
|
| 705 |
+
if index is None:
|
| 706 |
+
raise BadObject(sha)
|
| 707 |
+
return index
|
| 708 |
+
|
| 709 |
+
def _iter_objects(self, as_stream):
|
| 710 |
+
"""Iterate over all objects in our index and yield their OInfo or OStream instences"""
|
| 711 |
+
_sha = self._index.sha
|
| 712 |
+
_object = self._object
|
| 713 |
+
for index in range(self._index.size()):
|
| 714 |
+
yield _object(_sha(index), as_stream, index)
|
| 715 |
+
# END for each index
|
| 716 |
+
|
| 717 |
+
def _object(self, sha, as_stream, index=-1):
|
| 718 |
+
""":return: OInfo or OStream object providing information about the given sha
|
| 719 |
+
:param index: if not -1, its assumed to be the sha's index in the IndexFile"""
|
| 720 |
+
# its a little bit redundant here, but it needs to be efficient
|
| 721 |
+
if index < 0:
|
| 722 |
+
index = self._sha_to_index(sha)
|
| 723 |
+
if sha is None:
|
| 724 |
+
sha = self._index.sha(index)
|
| 725 |
+
# END assure sha is present ( in output )
|
| 726 |
+
offset = self._index.offset(index)
|
| 727 |
+
type_id, uncomp_size, data_rela_offset = pack_object_header_info(self._pack._cursor.use_region(offset).buffer())
|
| 728 |
+
if as_stream:
|
| 729 |
+
if type_id not in delta_types:
|
| 730 |
+
packstream = self._pack.stream(offset)
|
| 731 |
+
return OStream(sha, packstream.type, packstream.size, packstream.stream)
|
| 732 |
+
# END handle non-deltas
|
| 733 |
+
|
| 734 |
+
# produce a delta stream containing all info
|
| 735 |
+
# To prevent it from applying the deltas when querying the size,
|
| 736 |
+
# we extract it from the delta stream ourselves
|
| 737 |
+
streams = self.collect_streams_at_offset(offset)
|
| 738 |
+
dstream = DeltaApplyReader.new(streams)
|
| 739 |
+
|
| 740 |
+
return ODeltaStream(sha, dstream.type, None, dstream)
|
| 741 |
+
else:
|
| 742 |
+
if type_id not in delta_types:
|
| 743 |
+
return OInfo(sha, type_id_to_type_map[type_id], uncomp_size)
|
| 744 |
+
# END handle non-deltas
|
| 745 |
+
|
| 746 |
+
# deltas are a little tougher - unpack the first bytes to obtain
|
| 747 |
+
# the actual target size, as opposed to the size of the delta data
|
| 748 |
+
streams = self.collect_streams_at_offset(offset)
|
| 749 |
+
buf = streams[0].read(512)
|
| 750 |
+
offset, src_size = msb_size(buf)
|
| 751 |
+
offset, target_size = msb_size(buf, offset)
|
| 752 |
+
|
| 753 |
+
# collect the streams to obtain the actual object type
|
| 754 |
+
if streams[-1].type_id in delta_types:
|
| 755 |
+
raise BadObject(sha, "Could not resolve delta object")
|
| 756 |
+
return OInfo(sha, streams[-1].type, target_size)
|
| 757 |
+
# END handle stream
|
| 758 |
+
|
| 759 |
+
#{ Read-Database like Interface
|
| 760 |
+
|
| 761 |
+
def info(self, sha):
|
| 762 |
+
"""Retrieve information about the object identified by the given sha
|
| 763 |
+
|
| 764 |
+
:param sha: 20 byte sha1
|
| 765 |
+
:raise BadObject:
|
| 766 |
+
:return: OInfo instance, with 20 byte sha"""
|
| 767 |
+
return self._object(sha, False)
|
| 768 |
+
|
| 769 |
+
def stream(self, sha):
|
| 770 |
+
"""Retrieve an object stream along with its information as identified by the given sha
|
| 771 |
+
|
| 772 |
+
:param sha: 20 byte sha1
|
| 773 |
+
:raise BadObject:
|
| 774 |
+
:return: OStream instance, with 20 byte sha"""
|
| 775 |
+
return self._object(sha, True)
|
| 776 |
+
|
| 777 |
+
def info_at_index(self, index):
|
| 778 |
+
"""As ``info``, but uses a PackIndexFile compatible index to refer to the object"""
|
| 779 |
+
return self._object(None, False, index)
|
| 780 |
+
|
| 781 |
+
def stream_at_index(self, index):
|
| 782 |
+
"""As ``stream``, but uses a PackIndexFile compatible index to refer to the
|
| 783 |
+
object"""
|
| 784 |
+
return self._object(None, True, index)
|
| 785 |
+
|
| 786 |
+
#} END Read-Database like Interface
|
| 787 |
+
|
| 788 |
+
#{ Interface
|
| 789 |
+
|
| 790 |
+
def pack(self):
|
| 791 |
+
""":return: the underlying pack file instance"""
|
| 792 |
+
return self._pack
|
| 793 |
+
|
| 794 |
+
def index(self):
|
| 795 |
+
""":return: the underlying pack index file instance"""
|
| 796 |
+
return self._index
|
| 797 |
+
|
| 798 |
+
def is_valid_stream(self, sha, use_crc=False):
|
| 799 |
+
"""
|
| 800 |
+
Verify that the stream at the given sha is valid.
|
| 801 |
+
|
| 802 |
+
:param use_crc: if True, the index' crc is run over the compressed stream of
|
| 803 |
+
the object, which is much faster than checking the sha1. It is also
|
| 804 |
+
more prone to unnoticed corruption or manipulation.
|
| 805 |
+
:param sha: 20 byte sha1 of the object whose stream to verify
|
| 806 |
+
whether the compressed stream of the object is valid. If it is
|
| 807 |
+
a delta, this only verifies that the delta's data is valid, not the
|
| 808 |
+
data of the actual undeltified object, as it depends on more than
|
| 809 |
+
just this stream.
|
| 810 |
+
If False, the object will be decompressed and the sha generated. It must
|
| 811 |
+
match the given sha
|
| 812 |
+
|
| 813 |
+
:return: True if the stream is valid
|
| 814 |
+
:raise UnsupportedOperation: If the index is version 1 only
|
| 815 |
+
:raise BadObject: sha was not found"""
|
| 816 |
+
if use_crc:
|
| 817 |
+
if self._index.version() < 2:
|
| 818 |
+
raise UnsupportedOperation("Version 1 indices do not contain crc's, verify by sha instead")
|
| 819 |
+
# END handle index version
|
| 820 |
+
|
| 821 |
+
index = self._sha_to_index(sha)
|
| 822 |
+
offset = self._index.offset(index)
|
| 823 |
+
next_offset = self._offset_map[offset]
|
| 824 |
+
crc_value = self._index.crc(index)
|
| 825 |
+
|
| 826 |
+
# create the current crc value, on the compressed object data
|
| 827 |
+
# Read it in chunks, without copying the data
|
| 828 |
+
crc_update = zlib.crc32
|
| 829 |
+
pack_data = self._pack.data()
|
| 830 |
+
cur_pos = offset
|
| 831 |
+
this_crc_value = 0
|
| 832 |
+
while cur_pos < next_offset:
|
| 833 |
+
rbound = min(cur_pos + chunk_size, next_offset)
|
| 834 |
+
size = rbound - cur_pos
|
| 835 |
+
this_crc_value = crc_update(pack_data[cur_pos:cur_pos + size], this_crc_value)
|
| 836 |
+
cur_pos += size
|
| 837 |
+
# END window size loop
|
| 838 |
+
|
| 839 |
+
# crc returns signed 32 bit numbers, the AND op forces it into unsigned
|
| 840 |
+
# mode ... wow, sneaky, from dulwich.
|
| 841 |
+
return (this_crc_value & 0xffffffff) == crc_value
|
| 842 |
+
else:
|
| 843 |
+
shawriter = Sha1Writer()
|
| 844 |
+
stream = self._object(sha, as_stream=True)
|
| 845 |
+
# write a loose object, which is the basis for the sha
|
| 846 |
+
write_object(stream.type, stream.size, stream.read, shawriter.write)
|
| 847 |
+
|
| 848 |
+
assert shawriter.sha(as_hex=False) == sha
|
| 849 |
+
return shawriter.sha(as_hex=False) == sha
|
| 850 |
+
# END handle crc/sha verification
|
| 851 |
+
return True
|
| 852 |
+
|
| 853 |
+
def info_iter(self):
|
| 854 |
+
"""
|
| 855 |
+
:return: Iterator over all objects in this pack. The iterator yields
|
| 856 |
+
OInfo instances"""
|
| 857 |
+
return self._iter_objects(as_stream=False)
|
| 858 |
+
|
| 859 |
+
def stream_iter(self):
|
| 860 |
+
"""
|
| 861 |
+
:return: iterator over all objects in this pack. The iterator yields
|
| 862 |
+
OStream instances"""
|
| 863 |
+
return self._iter_objects(as_stream=True)
|
| 864 |
+
|
| 865 |
+
def collect_streams_at_offset(self, offset):
|
| 866 |
+
"""
|
| 867 |
+
As the version in the PackFile, but can resolve REF deltas within this pack
|
| 868 |
+
For more info, see ``collect_streams``
|
| 869 |
+
|
| 870 |
+
:param offset: offset into the pack file at which the object can be found"""
|
| 871 |
+
streams = self._pack.collect_streams(offset)
|
| 872 |
+
|
| 873 |
+
# try to resolve the last one if needed. It is assumed to be either
|
| 874 |
+
# a REF delta, or a base object, as OFFSET deltas are resolved by the pack
|
| 875 |
+
if streams[-1].type_id == REF_DELTA:
|
| 876 |
+
stream = streams[-1]
|
| 877 |
+
while stream.type_id in delta_types:
|
| 878 |
+
if stream.type_id == REF_DELTA:
|
| 879 |
+
# smmap can return memory view objects, which can't be compared as buffers/bytes can ...
|
| 880 |
+
if isinstance(stream.delta_info, memoryview):
|
| 881 |
+
sindex = self._index.sha_to_index(stream.delta_info.tobytes())
|
| 882 |
+
else:
|
| 883 |
+
sindex = self._index.sha_to_index(stream.delta_info)
|
| 884 |
+
if sindex is None:
|
| 885 |
+
break
|
| 886 |
+
stream = self._pack.stream(self._index.offset(sindex))
|
| 887 |
+
streams.append(stream)
|
| 888 |
+
else:
|
| 889 |
+
# must be another OFS DELTA - this could happen if a REF
|
| 890 |
+
# delta we resolve previously points to an OFS delta. Who
|
| 891 |
+
# would do that ;) ? We can handle it though
|
| 892 |
+
stream = self._pack.stream(stream.delta_info)
|
| 893 |
+
streams.append(stream)
|
| 894 |
+
# END handle ref delta
|
| 895 |
+
# END resolve ref streams
|
| 896 |
+
# END resolve streams
|
| 897 |
+
|
| 898 |
+
return streams
|
| 899 |
+
|
| 900 |
+
def collect_streams(self, sha):
|
| 901 |
+
"""
|
| 902 |
+
As ``PackFile.collect_streams``, but takes a sha instead of an offset.
|
| 903 |
+
Additionally, ref_delta streams will be resolved within this pack.
|
| 904 |
+
If this is not possible, the stream will be left alone, hence it is adivsed
|
| 905 |
+
to check for unresolved ref-deltas and resolve them before attempting to
|
| 906 |
+
construct a delta stream.
|
| 907 |
+
|
| 908 |
+
:param sha: 20 byte sha1 specifying the object whose related streams you want to collect
|
| 909 |
+
:return: list of streams, first being the actual object delta, the last being
|
| 910 |
+
a possibly unresolved base object.
|
| 911 |
+
:raise BadObject:"""
|
| 912 |
+
return self.collect_streams_at_offset(self._index.offset(self._sha_to_index(sha)))
|
| 913 |
+
|
| 914 |
+
@classmethod
|
| 915 |
+
def write_pack(cls, object_iter, pack_write, index_write=None,
|
| 916 |
+
object_count=None, zlib_compression=zlib.Z_BEST_SPEED):
|
| 917 |
+
"""
|
| 918 |
+
Create a new pack by putting all objects obtained by the object_iterator
|
| 919 |
+
into a pack which is written using the pack_write method.
|
| 920 |
+
The respective index is produced as well if index_write is not Non.
|
| 921 |
+
|
| 922 |
+
:param object_iter: iterator yielding odb output objects
|
| 923 |
+
:param pack_write: function to receive strings to write into the pack stream
|
| 924 |
+
:param indx_write: if not None, the function writes the index file corresponding
|
| 925 |
+
to the pack.
|
| 926 |
+
:param object_count: if you can provide the amount of objects in your iteration,
|
| 927 |
+
this would be the place to put it. Otherwise we have to pre-iterate and store
|
| 928 |
+
all items into a list to get the number, which uses more memory than necessary.
|
| 929 |
+
:param zlib_compression: the zlib compression level to use
|
| 930 |
+
:return: tuple(pack_sha, index_binsha) binary sha over all the contents of the pack
|
| 931 |
+
and over all contents of the index. If index_write was None, index_binsha will be None
|
| 932 |
+
|
| 933 |
+
**Note:** The destination of the write functions is up to the user. It could
|
| 934 |
+
be a socket, or a file for instance
|
| 935 |
+
|
| 936 |
+
**Note:** writes only undeltified objects"""
|
| 937 |
+
objs = object_iter
|
| 938 |
+
if not object_count:
|
| 939 |
+
if not isinstance(object_iter, (tuple, list)):
|
| 940 |
+
objs = list(object_iter)
|
| 941 |
+
# END handle list type
|
| 942 |
+
object_count = len(objs)
|
| 943 |
+
# END handle object
|
| 944 |
+
|
| 945 |
+
pack_writer = FlexibleSha1Writer(pack_write)
|
| 946 |
+
pwrite = pack_writer.write
|
| 947 |
+
ofs = 0 # current offset into the pack file
|
| 948 |
+
index = None
|
| 949 |
+
wants_index = index_write is not None
|
| 950 |
+
|
| 951 |
+
# write header
|
| 952 |
+
pwrite(pack('>LLL', PackFile.pack_signature, PackFile.pack_version_default, object_count))
|
| 953 |
+
ofs += 12
|
| 954 |
+
|
| 955 |
+
if wants_index:
|
| 956 |
+
index = IndexWriter()
|
| 957 |
+
# END handle index header
|
| 958 |
+
|
| 959 |
+
actual_count = 0
|
| 960 |
+
for obj in objs:
|
| 961 |
+
actual_count += 1
|
| 962 |
+
crc = 0
|
| 963 |
+
|
| 964 |
+
# object header
|
| 965 |
+
hdr = create_pack_object_header(obj.type_id, obj.size)
|
| 966 |
+
if index_write:
|
| 967 |
+
crc = crc32(hdr)
|
| 968 |
+
else:
|
| 969 |
+
crc = None
|
| 970 |
+
# END handle crc
|
| 971 |
+
pwrite(hdr)
|
| 972 |
+
|
| 973 |
+
# data stream
|
| 974 |
+
zstream = zlib.compressobj(zlib_compression)
|
| 975 |
+
ostream = obj.stream
|
| 976 |
+
br, bw, crc = write_stream_to_pack(ostream.read, pwrite, zstream, base_crc=crc)
|
| 977 |
+
assert(br == obj.size)
|
| 978 |
+
if wants_index:
|
| 979 |
+
index.append(obj.binsha, crc, ofs)
|
| 980 |
+
# END handle index
|
| 981 |
+
|
| 982 |
+
ofs += len(hdr) + bw
|
| 983 |
+
if actual_count == object_count:
|
| 984 |
+
break
|
| 985 |
+
# END abort once we are done
|
| 986 |
+
# END for each object
|
| 987 |
+
|
| 988 |
+
if actual_count != object_count:
|
| 989 |
+
raise ValueError(
|
| 990 |
+
"Expected to write %i objects into pack, but received only %i from iterators" % (object_count, actual_count))
|
| 991 |
+
# END count assertion
|
| 992 |
+
|
| 993 |
+
# write footer
|
| 994 |
+
pack_sha = pack_writer.sha(as_hex=False)
|
| 995 |
+
assert len(pack_sha) == 20
|
| 996 |
+
pack_write(pack_sha)
|
| 997 |
+
ofs += len(pack_sha) # just for completeness ;)
|
| 998 |
+
|
| 999 |
+
index_sha = None
|
| 1000 |
+
if wants_index:
|
| 1001 |
+
index_sha = index.write(pack_sha, index_write)
|
| 1002 |
+
# END handle index
|
| 1003 |
+
|
| 1004 |
+
return pack_sha, index_sha
|
| 1005 |
+
|
| 1006 |
+
@classmethod
|
| 1007 |
+
def create(cls, object_iter, base_dir, object_count=None, zlib_compression=zlib.Z_BEST_SPEED):
|
| 1008 |
+
"""Create a new on-disk entity comprised of a properly named pack file and a properly named
|
| 1009 |
+
and corresponding index file. The pack contains all OStream objects contained in object iter.
|
| 1010 |
+
:param base_dir: directory which is to contain the files
|
| 1011 |
+
:return: PackEntity instance initialized with the new pack
|
| 1012 |
+
|
| 1013 |
+
**Note:** for more information on the other parameters see the write_pack method"""
|
| 1014 |
+
pack_fd, pack_path = tempfile.mkstemp('', 'pack', base_dir)
|
| 1015 |
+
index_fd, index_path = tempfile.mkstemp('', 'index', base_dir)
|
| 1016 |
+
pack_write = lambda d: os.write(pack_fd, d)
|
| 1017 |
+
index_write = lambda d: os.write(index_fd, d)
|
| 1018 |
+
|
| 1019 |
+
pack_binsha, index_binsha = cls.write_pack(object_iter, pack_write, index_write, object_count, zlib_compression)
|
| 1020 |
+
os.close(pack_fd)
|
| 1021 |
+
os.close(index_fd)
|
| 1022 |
+
|
| 1023 |
+
fmt = "pack-%s.%s"
|
| 1024 |
+
new_pack_path = os.path.join(base_dir, fmt % (bin_to_hex(pack_binsha), 'pack'))
|
| 1025 |
+
new_index_path = os.path.join(base_dir, fmt % (bin_to_hex(pack_binsha), 'idx'))
|
| 1026 |
+
os.rename(pack_path, new_pack_path)
|
| 1027 |
+
os.rename(index_path, new_index_path)
|
| 1028 |
+
|
| 1029 |
+
return cls(new_pack_path)
|
| 1030 |
+
|
| 1031 |
+
#} END interface
|
vllm/lib/python3.10/site-packages/gitdb/stream.py
ADDED
|
@@ -0,0 +1,730 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
|
| 2 |
+
#
|
| 3 |
+
# This module is part of GitDB and is released under
|
| 4 |
+
# the New BSD License: https://opensource.org/license/bsd-3-clause/
|
| 5 |
+
|
| 6 |
+
from io import BytesIO
|
| 7 |
+
|
| 8 |
+
import mmap
|
| 9 |
+
import os
|
| 10 |
+
import sys
|
| 11 |
+
import zlib
|
| 12 |
+
|
| 13 |
+
from gitdb.fun import (
|
| 14 |
+
msb_size,
|
| 15 |
+
stream_copy,
|
| 16 |
+
apply_delta_data,
|
| 17 |
+
connect_deltas,
|
| 18 |
+
delta_types
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
from gitdb.util import (
|
| 22 |
+
allocate_memory,
|
| 23 |
+
LazyMixin,
|
| 24 |
+
make_sha,
|
| 25 |
+
write,
|
| 26 |
+
close,
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
from gitdb.const import NULL_BYTE, BYTE_SPACE
|
| 30 |
+
from gitdb.utils.encoding import force_bytes
|
| 31 |
+
|
| 32 |
+
has_perf_mod = False
|
| 33 |
+
try:
|
| 34 |
+
from gitdb_speedups._perf import apply_delta as c_apply_delta
|
| 35 |
+
has_perf_mod = True
|
| 36 |
+
except ImportError:
|
| 37 |
+
pass
|
| 38 |
+
|
| 39 |
+
__all__ = ('DecompressMemMapReader', 'FDCompressedSha1Writer', 'DeltaApplyReader',
|
| 40 |
+
'Sha1Writer', 'FlexibleSha1Writer', 'ZippedStoreShaWriter', 'FDCompressedSha1Writer',
|
| 41 |
+
'FDStream', 'NullStream')
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
#{ RO Streams
|
| 45 |
+
|
| 46 |
+
class DecompressMemMapReader(LazyMixin):
|
| 47 |
+
|
| 48 |
+
"""Reads data in chunks from a memory map and decompresses it. The client sees
|
| 49 |
+
only the uncompressed data, respective file-like read calls are handling on-demand
|
| 50 |
+
buffered decompression accordingly
|
| 51 |
+
|
| 52 |
+
A constraint on the total size of bytes is activated, simulating
|
| 53 |
+
a logical file within a possibly larger physical memory area
|
| 54 |
+
|
| 55 |
+
To read efficiently, you clearly don't want to read individual bytes, instead,
|
| 56 |
+
read a few kilobytes at least.
|
| 57 |
+
|
| 58 |
+
**Note:** The chunk-size should be carefully selected as it will involve quite a bit
|
| 59 |
+
of string copying due to the way the zlib is implemented. Its very wasteful,
|
| 60 |
+
hence we try to find a good tradeoff between allocation time and number of
|
| 61 |
+
times we actually allocate. An own zlib implementation would be good here
|
| 62 |
+
to better support streamed reading - it would only need to keep the mmap
|
| 63 |
+
and decompress it into chunks, that's all ... """
|
| 64 |
+
__slots__ = ('_m', '_zip', '_buf', '_buflen', '_br', '_cws', '_cwe', '_s', '_close',
|
| 65 |
+
'_cbr', '_phi')
|
| 66 |
+
|
| 67 |
+
max_read_size = 512 * 1024 # currently unused
|
| 68 |
+
|
| 69 |
+
def __init__(self, m, close_on_deletion, size=None):
|
| 70 |
+
"""Initialize with mmap for stream reading
|
| 71 |
+
:param m: must be content data - use new if you have object data and no size"""
|
| 72 |
+
self._m = m
|
| 73 |
+
self._zip = zlib.decompressobj()
|
| 74 |
+
self._buf = None # buffer of decompressed bytes
|
| 75 |
+
self._buflen = 0 # length of bytes in buffer
|
| 76 |
+
if size is not None:
|
| 77 |
+
self._s = size # size of uncompressed data to read in total
|
| 78 |
+
self._br = 0 # num uncompressed bytes read
|
| 79 |
+
self._cws = 0 # start byte of compression window
|
| 80 |
+
self._cwe = 0 # end byte of compression window
|
| 81 |
+
self._cbr = 0 # number of compressed bytes read
|
| 82 |
+
self._phi = False # is True if we parsed the header info
|
| 83 |
+
self._close = close_on_deletion # close the memmap on deletion ?
|
| 84 |
+
|
| 85 |
+
def _set_cache_(self, attr):
|
| 86 |
+
assert attr == '_s'
|
| 87 |
+
# only happens for size, which is a marker to indicate we still
|
| 88 |
+
# have to parse the header from the stream
|
| 89 |
+
self._parse_header_info()
|
| 90 |
+
|
| 91 |
+
def __del__(self):
|
| 92 |
+
self.close()
|
| 93 |
+
|
| 94 |
+
def _parse_header_info(self):
|
| 95 |
+
"""If this stream contains object data, parse the header info and skip the
|
| 96 |
+
stream to a point where each read will yield object content
|
| 97 |
+
|
| 98 |
+
:return: parsed type_string, size"""
|
| 99 |
+
# read header
|
| 100 |
+
# should really be enough, cgit uses 8192 I believe
|
| 101 |
+
# And for good reason !! This needs to be that high for the header to be read correctly in all cases
|
| 102 |
+
maxb = 8192
|
| 103 |
+
self._s = maxb
|
| 104 |
+
hdr = self.read(maxb)
|
| 105 |
+
hdrend = hdr.find(NULL_BYTE)
|
| 106 |
+
typ, size = hdr[:hdrend].split(BYTE_SPACE)
|
| 107 |
+
size = int(size)
|
| 108 |
+
self._s = size
|
| 109 |
+
|
| 110 |
+
# adjust internal state to match actual header length that we ignore
|
| 111 |
+
# The buffer will be depleted first on future reads
|
| 112 |
+
self._br = 0
|
| 113 |
+
hdrend += 1
|
| 114 |
+
self._buf = BytesIO(hdr[hdrend:])
|
| 115 |
+
self._buflen = len(hdr) - hdrend
|
| 116 |
+
|
| 117 |
+
self._phi = True
|
| 118 |
+
|
| 119 |
+
return typ, size
|
| 120 |
+
|
| 121 |
+
#{ Interface
|
| 122 |
+
|
| 123 |
+
@classmethod
|
| 124 |
+
def new(self, m, close_on_deletion=False):
|
| 125 |
+
"""Create a new DecompressMemMapReader instance for acting as a read-only stream
|
| 126 |
+
This method parses the object header from m and returns the parsed
|
| 127 |
+
type and size, as well as the created stream instance.
|
| 128 |
+
|
| 129 |
+
:param m: memory map on which to operate. It must be object data ( header + contents )
|
| 130 |
+
:param close_on_deletion: if True, the memory map will be closed once we are
|
| 131 |
+
being deleted"""
|
| 132 |
+
inst = DecompressMemMapReader(m, close_on_deletion, 0)
|
| 133 |
+
typ, size = inst._parse_header_info()
|
| 134 |
+
return typ, size, inst
|
| 135 |
+
|
| 136 |
+
def data(self):
|
| 137 |
+
""":return: random access compatible data we are working on"""
|
| 138 |
+
return self._m
|
| 139 |
+
|
| 140 |
+
def close(self):
|
| 141 |
+
"""Close our underlying stream of compressed bytes if this was allowed during initialization
|
| 142 |
+
:return: True if we closed the underlying stream
|
| 143 |
+
:note: can be called safely
|
| 144 |
+
"""
|
| 145 |
+
if self._close:
|
| 146 |
+
if hasattr(self._m, 'close'):
|
| 147 |
+
self._m.close()
|
| 148 |
+
self._close = False
|
| 149 |
+
# END handle resource freeing
|
| 150 |
+
|
| 151 |
+
def compressed_bytes_read(self):
|
| 152 |
+
"""
|
| 153 |
+
:return: number of compressed bytes read. This includes the bytes it
|
| 154 |
+
took to decompress the header ( if there was one )"""
|
| 155 |
+
# ABSTRACT: When decompressing a byte stream, it can be that the first
|
| 156 |
+
# x bytes which were requested match the first x bytes in the loosely
|
| 157 |
+
# compressed datastream. This is the worst-case assumption that the reader
|
| 158 |
+
# does, it assumes that it will get at least X bytes from X compressed bytes
|
| 159 |
+
# in call cases.
|
| 160 |
+
# The caveat is that the object, according to our known uncompressed size,
|
| 161 |
+
# is already complete, but there are still some bytes left in the compressed
|
| 162 |
+
# stream that contribute to the amount of compressed bytes.
|
| 163 |
+
# How can we know that we are truly done, and have read all bytes we need
|
| 164 |
+
# to read ?
|
| 165 |
+
# Without help, we cannot know, as we need to obtain the status of the
|
| 166 |
+
# decompression. If it is not finished, we need to decompress more data
|
| 167 |
+
# until it is finished, to yield the actual number of compressed bytes
|
| 168 |
+
# belonging to the decompressed object
|
| 169 |
+
# We are using a custom zlib module for this, if its not present,
|
| 170 |
+
# we try to put in additional bytes up for decompression if feasible
|
| 171 |
+
# and check for the unused_data.
|
| 172 |
+
|
| 173 |
+
# Only scrub the stream forward if we are officially done with the
|
| 174 |
+
# bytes we were to have.
|
| 175 |
+
if self._br == self._s and not self._zip.unused_data:
|
| 176 |
+
# manipulate the bytes-read to allow our own read method to continue
|
| 177 |
+
# but keep the window at its current position
|
| 178 |
+
self._br = 0
|
| 179 |
+
if hasattr(self._zip, 'status'):
|
| 180 |
+
while self._zip.status == zlib.Z_OK:
|
| 181 |
+
self.read(mmap.PAGESIZE)
|
| 182 |
+
# END scrub-loop custom zlib
|
| 183 |
+
else:
|
| 184 |
+
# pass in additional pages, until we have unused data
|
| 185 |
+
while not self._zip.unused_data and self._cbr != len(self._m):
|
| 186 |
+
self.read(mmap.PAGESIZE)
|
| 187 |
+
# END scrub-loop default zlib
|
| 188 |
+
# END handle stream scrubbing
|
| 189 |
+
|
| 190 |
+
# reset bytes read, just to be sure
|
| 191 |
+
self._br = self._s
|
| 192 |
+
# END handle stream scrubbing
|
| 193 |
+
|
| 194 |
+
# unused data ends up in the unconsumed tail, which was removed
|
| 195 |
+
# from the count already
|
| 196 |
+
return self._cbr
|
| 197 |
+
|
| 198 |
+
#} END interface
|
| 199 |
+
|
| 200 |
+
def seek(self, offset, whence=getattr(os, 'SEEK_SET', 0)):
|
| 201 |
+
"""Allows to reset the stream to restart reading
|
| 202 |
+
:raise ValueError: If offset and whence are not 0"""
|
| 203 |
+
if offset != 0 or whence != getattr(os, 'SEEK_SET', 0):
|
| 204 |
+
raise ValueError("Can only seek to position 0")
|
| 205 |
+
# END handle offset
|
| 206 |
+
|
| 207 |
+
self._zip = zlib.decompressobj()
|
| 208 |
+
self._br = self._cws = self._cwe = self._cbr = 0
|
| 209 |
+
if self._phi:
|
| 210 |
+
self._phi = False
|
| 211 |
+
del(self._s) # trigger header parsing on first access
|
| 212 |
+
# END skip header
|
| 213 |
+
|
| 214 |
+
def read(self, size=-1):
|
| 215 |
+
if size < 1:
|
| 216 |
+
size = self._s - self._br
|
| 217 |
+
else:
|
| 218 |
+
size = min(size, self._s - self._br)
|
| 219 |
+
# END clamp size
|
| 220 |
+
|
| 221 |
+
if size == 0:
|
| 222 |
+
return b''
|
| 223 |
+
# END handle depletion
|
| 224 |
+
|
| 225 |
+
# deplete the buffer, then just continue using the decompress object
|
| 226 |
+
# which has an own buffer. We just need this to transparently parse the
|
| 227 |
+
# header from the zlib stream
|
| 228 |
+
dat = b''
|
| 229 |
+
if self._buf:
|
| 230 |
+
if self._buflen >= size:
|
| 231 |
+
# have enough data
|
| 232 |
+
dat = self._buf.read(size)
|
| 233 |
+
self._buflen -= size
|
| 234 |
+
self._br += size
|
| 235 |
+
return dat
|
| 236 |
+
else:
|
| 237 |
+
dat = self._buf.read() # ouch, duplicates data
|
| 238 |
+
size -= self._buflen
|
| 239 |
+
self._br += self._buflen
|
| 240 |
+
|
| 241 |
+
self._buflen = 0
|
| 242 |
+
self._buf = None
|
| 243 |
+
# END handle buffer len
|
| 244 |
+
# END handle buffer
|
| 245 |
+
|
| 246 |
+
# decompress some data
|
| 247 |
+
# Abstract: zlib needs to operate on chunks of our memory map ( which may
|
| 248 |
+
# be large ), as it will otherwise and always fill in the 'unconsumed_tail'
|
| 249 |
+
# attribute which possible reads our whole map to the end, forcing
|
| 250 |
+
# everything to be read from disk even though just a portion was requested.
|
| 251 |
+
# As this would be a nogo, we workaround it by passing only chunks of data,
|
| 252 |
+
# moving the window into the memory map along as we decompress, which keeps
|
| 253 |
+
# the tail smaller than our chunk-size. This causes 'only' the chunk to be
|
| 254 |
+
# copied once, and another copy of a part of it when it creates the unconsumed
|
| 255 |
+
# tail. We have to use it to hand in the appropriate amount of bytes during
|
| 256 |
+
# the next read.
|
| 257 |
+
tail = self._zip.unconsumed_tail
|
| 258 |
+
if tail:
|
| 259 |
+
# move the window, make it as large as size demands. For code-clarity,
|
| 260 |
+
# we just take the chunk from our map again instead of reusing the unconsumed
|
| 261 |
+
# tail. The latter one would safe some memory copying, but we could end up
|
| 262 |
+
# with not getting enough data uncompressed, so we had to sort that out as well.
|
| 263 |
+
# Now we just assume the worst case, hence the data is uncompressed and the window
|
| 264 |
+
# needs to be as large as the uncompressed bytes we want to read.
|
| 265 |
+
self._cws = self._cwe - len(tail)
|
| 266 |
+
self._cwe = self._cws + size
|
| 267 |
+
else:
|
| 268 |
+
cws = self._cws
|
| 269 |
+
self._cws = self._cwe
|
| 270 |
+
self._cwe = cws + size
|
| 271 |
+
# END handle tail
|
| 272 |
+
|
| 273 |
+
# if window is too small, make it larger so zip can decompress something
|
| 274 |
+
if self._cwe - self._cws < 8:
|
| 275 |
+
self._cwe = self._cws + 8
|
| 276 |
+
# END adjust winsize
|
| 277 |
+
|
| 278 |
+
# takes a slice, but doesn't copy the data, it says ...
|
| 279 |
+
indata = self._m[self._cws:self._cwe]
|
| 280 |
+
|
| 281 |
+
# get the actual window end to be sure we don't use it for computations
|
| 282 |
+
self._cwe = self._cws + len(indata)
|
| 283 |
+
dcompdat = self._zip.decompress(indata, size)
|
| 284 |
+
# update the amount of compressed bytes read
|
| 285 |
+
# We feed possibly overlapping chunks, which is why the unconsumed tail
|
| 286 |
+
# has to be taken into consideration, as well as the unused data
|
| 287 |
+
# if we hit the end of the stream
|
| 288 |
+
# NOTE: Behavior changed in PY2.7 onward, which requires special handling to make the tests work properly.
|
| 289 |
+
# They are thorough, and I assume it is truly working.
|
| 290 |
+
# Why is this logic as convoluted as it is ? Please look at the table in
|
| 291 |
+
# https://github.com/gitpython-developers/gitdb/issues/19 to learn about the test-results.
|
| 292 |
+
# Basically, on py2.6, you want to use branch 1, whereas on all other python version, the second branch
|
| 293 |
+
# will be the one that works.
|
| 294 |
+
# However, the zlib VERSIONs as well as the platform check is used to further match the entries in the
|
| 295 |
+
# table in the github issue. This is it ... it was the only way I could make this work everywhere.
|
| 296 |
+
# IT's CERTAINLY GOING TO BITE US IN THE FUTURE ... .
|
| 297 |
+
if getattr(zlib, 'ZLIB_RUNTIME_VERSION', zlib.ZLIB_VERSION) in ('1.2.7', '1.2.5') and not sys.platform == 'darwin':
|
| 298 |
+
unused_datalen = len(self._zip.unconsumed_tail)
|
| 299 |
+
else:
|
| 300 |
+
unused_datalen = len(self._zip.unconsumed_tail) + len(self._zip.unused_data)
|
| 301 |
+
# # end handle very special case ...
|
| 302 |
+
|
| 303 |
+
self._cbr += len(indata) - unused_datalen
|
| 304 |
+
self._br += len(dcompdat)
|
| 305 |
+
|
| 306 |
+
if dat:
|
| 307 |
+
dcompdat = dat + dcompdat
|
| 308 |
+
# END prepend our cached data
|
| 309 |
+
|
| 310 |
+
# it can happen, depending on the compression, that we get less bytes
|
| 311 |
+
# than ordered as it needs the final portion of the data as well.
|
| 312 |
+
# Recursively resolve that.
|
| 313 |
+
# Note: dcompdat can be empty even though we still appear to have bytes
|
| 314 |
+
# to read, if we are called by compressed_bytes_read - it manipulates
|
| 315 |
+
# us to empty the stream
|
| 316 |
+
if dcompdat and (len(dcompdat) - len(dat)) < size and self._br < self._s:
|
| 317 |
+
dcompdat += self.read(size - len(dcompdat))
|
| 318 |
+
# END handle special case
|
| 319 |
+
return dcompdat
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
class DeltaApplyReader(LazyMixin):
|
| 323 |
+
|
| 324 |
+
"""A reader which dynamically applies pack deltas to a base object, keeping the
|
| 325 |
+
memory demands to a minimum.
|
| 326 |
+
|
| 327 |
+
The size of the final object is only obtainable once all deltas have been
|
| 328 |
+
applied, unless it is retrieved from a pack index.
|
| 329 |
+
|
| 330 |
+
The uncompressed Delta has the following layout (MSB being a most significant
|
| 331 |
+
bit encoded dynamic size):
|
| 332 |
+
|
| 333 |
+
* MSB Source Size - the size of the base against which the delta was created
|
| 334 |
+
* MSB Target Size - the size of the resulting data after the delta was applied
|
| 335 |
+
* A list of one byte commands (cmd) which are followed by a specific protocol:
|
| 336 |
+
|
| 337 |
+
* cmd & 0x80 - copy delta_data[offset:offset+size]
|
| 338 |
+
|
| 339 |
+
* Followed by an encoded offset into the delta data
|
| 340 |
+
* Followed by an encoded size of the chunk to copy
|
| 341 |
+
|
| 342 |
+
* cmd & 0x7f - insert
|
| 343 |
+
|
| 344 |
+
* insert cmd bytes from the delta buffer into the output stream
|
| 345 |
+
|
| 346 |
+
* cmd == 0 - invalid operation ( or error in delta stream )
|
| 347 |
+
"""
|
| 348 |
+
__slots__ = (
|
| 349 |
+
"_bstream", # base stream to which to apply the deltas
|
| 350 |
+
"_dstreams", # tuple of delta stream readers
|
| 351 |
+
"_mm_target", # memory map of the delta-applied data
|
| 352 |
+
"_size", # actual number of bytes in _mm_target
|
| 353 |
+
"_br" # number of bytes read
|
| 354 |
+
)
|
| 355 |
+
|
| 356 |
+
#{ Configuration
|
| 357 |
+
k_max_memory_move = 250 * 1000 * 1000
|
| 358 |
+
#} END configuration
|
| 359 |
+
|
| 360 |
+
def __init__(self, stream_list):
|
| 361 |
+
"""Initialize this instance with a list of streams, the first stream being
|
| 362 |
+
the delta to apply on top of all following deltas, the last stream being the
|
| 363 |
+
base object onto which to apply the deltas"""
|
| 364 |
+
assert len(stream_list) > 1, "Need at least one delta and one base stream"
|
| 365 |
+
|
| 366 |
+
self._bstream = stream_list[-1]
|
| 367 |
+
self._dstreams = tuple(stream_list[:-1])
|
| 368 |
+
self._br = 0
|
| 369 |
+
|
| 370 |
+
def _set_cache_too_slow_without_c(self, attr):
|
| 371 |
+
# the direct algorithm is fastest and most direct if there is only one
|
| 372 |
+
# delta. Also, the extra overhead might not be worth it for items smaller
|
| 373 |
+
# than X - definitely the case in python, every function call costs
|
| 374 |
+
# huge amounts of time
|
| 375 |
+
# if len(self._dstreams) * self._bstream.size < self.k_max_memory_move:
|
| 376 |
+
if len(self._dstreams) == 1:
|
| 377 |
+
return self._set_cache_brute_(attr)
|
| 378 |
+
|
| 379 |
+
# Aggregate all deltas into one delta in reverse order. Hence we take
|
| 380 |
+
# the last delta, and reverse-merge its ancestor delta, until we receive
|
| 381 |
+
# the final delta data stream.
|
| 382 |
+
dcl = connect_deltas(self._dstreams)
|
| 383 |
+
|
| 384 |
+
# call len directly, as the (optional) c version doesn't implement the sequence
|
| 385 |
+
# protocol
|
| 386 |
+
if dcl.rbound() == 0:
|
| 387 |
+
self._size = 0
|
| 388 |
+
self._mm_target = allocate_memory(0)
|
| 389 |
+
return
|
| 390 |
+
# END handle empty list
|
| 391 |
+
|
| 392 |
+
self._size = dcl.rbound()
|
| 393 |
+
self._mm_target = allocate_memory(self._size)
|
| 394 |
+
|
| 395 |
+
bbuf = allocate_memory(self._bstream.size)
|
| 396 |
+
stream_copy(self._bstream.read, bbuf.write, self._bstream.size, 256 * mmap.PAGESIZE)
|
| 397 |
+
|
| 398 |
+
# APPLY CHUNKS
|
| 399 |
+
write = self._mm_target.write
|
| 400 |
+
dcl.apply(bbuf, write)
|
| 401 |
+
|
| 402 |
+
self._mm_target.seek(0)
|
| 403 |
+
|
| 404 |
+
def _set_cache_brute_(self, attr):
|
| 405 |
+
"""If we are here, we apply the actual deltas"""
|
| 406 |
+
# TODO: There should be a special case if there is only one stream
|
| 407 |
+
# Then the default-git algorithm should perform a tad faster, as the
|
| 408 |
+
# delta is not peaked into, causing less overhead.
|
| 409 |
+
buffer_info_list = list()
|
| 410 |
+
max_target_size = 0
|
| 411 |
+
for dstream in self._dstreams:
|
| 412 |
+
buf = dstream.read(512) # read the header information + X
|
| 413 |
+
offset, src_size = msb_size(buf)
|
| 414 |
+
offset, target_size = msb_size(buf, offset)
|
| 415 |
+
buffer_info_list.append((buf[offset:], offset, src_size, target_size))
|
| 416 |
+
max_target_size = max(max_target_size, target_size)
|
| 417 |
+
# END for each delta stream
|
| 418 |
+
|
| 419 |
+
# sanity check - the first delta to apply should have the same source
|
| 420 |
+
# size as our actual base stream
|
| 421 |
+
base_size = self._bstream.size
|
| 422 |
+
target_size = max_target_size
|
| 423 |
+
|
| 424 |
+
# if we have more than 1 delta to apply, we will swap buffers, hence we must
|
| 425 |
+
# assure that all buffers we use are large enough to hold all the results
|
| 426 |
+
if len(self._dstreams) > 1:
|
| 427 |
+
base_size = target_size = max(base_size, max_target_size)
|
| 428 |
+
# END adjust buffer sizes
|
| 429 |
+
|
| 430 |
+
# Allocate private memory map big enough to hold the first base buffer
|
| 431 |
+
# We need random access to it
|
| 432 |
+
bbuf = allocate_memory(base_size)
|
| 433 |
+
stream_copy(self._bstream.read, bbuf.write, base_size, 256 * mmap.PAGESIZE)
|
| 434 |
+
|
| 435 |
+
# allocate memory map large enough for the largest (intermediate) target
|
| 436 |
+
# We will use it as scratch space for all delta ops. If the final
|
| 437 |
+
# target buffer is smaller than our allocated space, we just use parts
|
| 438 |
+
# of it upon return.
|
| 439 |
+
tbuf = allocate_memory(target_size)
|
| 440 |
+
|
| 441 |
+
# for each delta to apply, memory map the decompressed delta and
|
| 442 |
+
# work on the op-codes to reconstruct everything.
|
| 443 |
+
# For the actual copying, we use a seek and write pattern of buffer
|
| 444 |
+
# slices.
|
| 445 |
+
final_target_size = None
|
| 446 |
+
for (dbuf, offset, src_size, target_size), dstream in zip(reversed(buffer_info_list), reversed(self._dstreams)):
|
| 447 |
+
# allocate a buffer to hold all delta data - fill in the data for
|
| 448 |
+
# fast access. We do this as we know that reading individual bytes
|
| 449 |
+
# from our stream would be slower than necessary ( although possible )
|
| 450 |
+
# The dbuf buffer contains commands after the first two MSB sizes, the
|
| 451 |
+
# offset specifies the amount of bytes read to get the sizes.
|
| 452 |
+
ddata = allocate_memory(dstream.size - offset)
|
| 453 |
+
ddata.write(dbuf)
|
| 454 |
+
# read the rest from the stream. The size we give is larger than necessary
|
| 455 |
+
stream_copy(dstream.read, ddata.write, dstream.size, 256 * mmap.PAGESIZE)
|
| 456 |
+
|
| 457 |
+
#######################################################################
|
| 458 |
+
if 'c_apply_delta' in globals():
|
| 459 |
+
c_apply_delta(bbuf, ddata, tbuf)
|
| 460 |
+
else:
|
| 461 |
+
apply_delta_data(bbuf, src_size, ddata, len(ddata), tbuf.write)
|
| 462 |
+
#######################################################################
|
| 463 |
+
|
| 464 |
+
# finally, swap out source and target buffers. The target is now the
|
| 465 |
+
# base for the next delta to apply
|
| 466 |
+
bbuf, tbuf = tbuf, bbuf
|
| 467 |
+
bbuf.seek(0)
|
| 468 |
+
tbuf.seek(0)
|
| 469 |
+
final_target_size = target_size
|
| 470 |
+
# END for each delta to apply
|
| 471 |
+
|
| 472 |
+
# its already seeked to 0, constrain it to the actual size
|
| 473 |
+
# NOTE: in the end of the loop, it swaps buffers, hence our target buffer
|
| 474 |
+
# is not tbuf, but bbuf !
|
| 475 |
+
self._mm_target = bbuf
|
| 476 |
+
self._size = final_target_size
|
| 477 |
+
|
| 478 |
+
#{ Configuration
|
| 479 |
+
if not has_perf_mod:
|
| 480 |
+
_set_cache_ = _set_cache_brute_
|
| 481 |
+
else:
|
| 482 |
+
_set_cache_ = _set_cache_too_slow_without_c
|
| 483 |
+
|
| 484 |
+
#} END configuration
|
| 485 |
+
|
| 486 |
+
def read(self, count=0):
|
| 487 |
+
bl = self._size - self._br # bytes left
|
| 488 |
+
if count < 1 or count > bl:
|
| 489 |
+
count = bl
|
| 490 |
+
# NOTE: we could check for certain size limits, and possibly
|
| 491 |
+
# return buffers instead of strings to prevent byte copying
|
| 492 |
+
data = self._mm_target.read(count)
|
| 493 |
+
self._br += len(data)
|
| 494 |
+
return data
|
| 495 |
+
|
| 496 |
+
def seek(self, offset, whence=getattr(os, 'SEEK_SET', 0)):
|
| 497 |
+
"""Allows to reset the stream to restart reading
|
| 498 |
+
|
| 499 |
+
:raise ValueError: If offset and whence are not 0"""
|
| 500 |
+
if offset != 0 or whence != getattr(os, 'SEEK_SET', 0):
|
| 501 |
+
raise ValueError("Can only seek to position 0")
|
| 502 |
+
# END handle offset
|
| 503 |
+
self._br = 0
|
| 504 |
+
self._mm_target.seek(0)
|
| 505 |
+
|
| 506 |
+
#{ Interface
|
| 507 |
+
|
| 508 |
+
@classmethod
|
| 509 |
+
def new(cls, stream_list):
|
| 510 |
+
"""
|
| 511 |
+
Convert the given list of streams into a stream which resolves deltas
|
| 512 |
+
when reading from it.
|
| 513 |
+
|
| 514 |
+
:param stream_list: two or more stream objects, first stream is a Delta
|
| 515 |
+
to the object that you want to resolve, followed by N additional delta
|
| 516 |
+
streams. The list's last stream must be a non-delta stream.
|
| 517 |
+
|
| 518 |
+
:return: Non-Delta OPackStream object whose stream can be used to obtain
|
| 519 |
+
the decompressed resolved data
|
| 520 |
+
:raise ValueError: if the stream list cannot be handled"""
|
| 521 |
+
if len(stream_list) < 2:
|
| 522 |
+
raise ValueError("Need at least two streams")
|
| 523 |
+
# END single object special handling
|
| 524 |
+
|
| 525 |
+
if stream_list[-1].type_id in delta_types:
|
| 526 |
+
raise ValueError(
|
| 527 |
+
"Cannot resolve deltas if there is no base object stream, last one was type: %s" % stream_list[-1].type)
|
| 528 |
+
# END check stream
|
| 529 |
+
return cls(stream_list)
|
| 530 |
+
|
| 531 |
+
#} END interface
|
| 532 |
+
|
| 533 |
+
#{ OInfo like Interface
|
| 534 |
+
|
| 535 |
+
@property
|
| 536 |
+
def type(self):
|
| 537 |
+
return self._bstream.type
|
| 538 |
+
|
| 539 |
+
@property
|
| 540 |
+
def type_id(self):
|
| 541 |
+
return self._bstream.type_id
|
| 542 |
+
|
| 543 |
+
@property
|
| 544 |
+
def size(self):
|
| 545 |
+
""":return: number of uncompressed bytes in the stream"""
|
| 546 |
+
return self._size
|
| 547 |
+
|
| 548 |
+
#} END oinfo like interface
|
| 549 |
+
|
| 550 |
+
|
| 551 |
+
#} END RO streams
|
| 552 |
+
|
| 553 |
+
|
| 554 |
+
#{ W Streams
|
| 555 |
+
|
| 556 |
+
class Sha1Writer:
|
| 557 |
+
|
| 558 |
+
"""Simple stream writer which produces a sha whenever you like as it degests
|
| 559 |
+
everything it is supposed to write"""
|
| 560 |
+
__slots__ = "sha1"
|
| 561 |
+
|
| 562 |
+
def __init__(self):
|
| 563 |
+
self.sha1 = make_sha()
|
| 564 |
+
|
| 565 |
+
#{ Stream Interface
|
| 566 |
+
|
| 567 |
+
def write(self, data):
|
| 568 |
+
""":raise IOError: If not all bytes could be written
|
| 569 |
+
:param data: byte object
|
| 570 |
+
:return: length of incoming data"""
|
| 571 |
+
|
| 572 |
+
self.sha1.update(data)
|
| 573 |
+
|
| 574 |
+
return len(data)
|
| 575 |
+
|
| 576 |
+
# END stream interface
|
| 577 |
+
|
| 578 |
+
#{ Interface
|
| 579 |
+
|
| 580 |
+
def sha(self, as_hex=False):
|
| 581 |
+
""":return: sha so far
|
| 582 |
+
:param as_hex: if True, sha will be hex-encoded, binary otherwise"""
|
| 583 |
+
if as_hex:
|
| 584 |
+
return self.sha1.hexdigest()
|
| 585 |
+
return self.sha1.digest()
|
| 586 |
+
|
| 587 |
+
#} END interface
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
class FlexibleSha1Writer(Sha1Writer):
|
| 591 |
+
|
| 592 |
+
"""Writer producing a sha1 while passing on the written bytes to the given
|
| 593 |
+
write function"""
|
| 594 |
+
__slots__ = 'writer'
|
| 595 |
+
|
| 596 |
+
def __init__(self, writer):
|
| 597 |
+
Sha1Writer.__init__(self)
|
| 598 |
+
self.writer = writer
|
| 599 |
+
|
| 600 |
+
def write(self, data):
|
| 601 |
+
Sha1Writer.write(self, data)
|
| 602 |
+
self.writer(data)
|
| 603 |
+
|
| 604 |
+
|
| 605 |
+
class ZippedStoreShaWriter(Sha1Writer):
|
| 606 |
+
|
| 607 |
+
"""Remembers everything someone writes to it and generates a sha"""
|
| 608 |
+
__slots__ = ('buf', 'zip')
|
| 609 |
+
|
| 610 |
+
def __init__(self):
|
| 611 |
+
Sha1Writer.__init__(self)
|
| 612 |
+
self.buf = BytesIO()
|
| 613 |
+
self.zip = zlib.compressobj(zlib.Z_BEST_SPEED)
|
| 614 |
+
|
| 615 |
+
def __getattr__(self, attr):
|
| 616 |
+
return getattr(self.buf, attr)
|
| 617 |
+
|
| 618 |
+
def write(self, data):
|
| 619 |
+
alen = Sha1Writer.write(self, data)
|
| 620 |
+
self.buf.write(self.zip.compress(data))
|
| 621 |
+
|
| 622 |
+
return alen
|
| 623 |
+
|
| 624 |
+
def close(self):
|
| 625 |
+
self.buf.write(self.zip.flush())
|
| 626 |
+
|
| 627 |
+
def seek(self, offset, whence=getattr(os, 'SEEK_SET', 0)):
|
| 628 |
+
"""Seeking currently only supports to rewind written data
|
| 629 |
+
Multiple writes are not supported"""
|
| 630 |
+
if offset != 0 or whence != getattr(os, 'SEEK_SET', 0):
|
| 631 |
+
raise ValueError("Can only seek to position 0")
|
| 632 |
+
# END handle offset
|
| 633 |
+
self.buf.seek(0)
|
| 634 |
+
|
| 635 |
+
def getvalue(self):
|
| 636 |
+
""":return: string value from the current stream position to the end"""
|
| 637 |
+
return self.buf.getvalue()
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
class FDCompressedSha1Writer(Sha1Writer):
|
| 641 |
+
|
| 642 |
+
"""Digests data written to it, making the sha available, then compress the
|
| 643 |
+
data and write it to the file descriptor
|
| 644 |
+
|
| 645 |
+
**Note:** operates on raw file descriptors
|
| 646 |
+
**Note:** for this to work, you have to use the close-method of this instance"""
|
| 647 |
+
__slots__ = ("fd", "sha1", "zip")
|
| 648 |
+
|
| 649 |
+
# default exception
|
| 650 |
+
exc = IOError("Failed to write all bytes to filedescriptor")
|
| 651 |
+
|
| 652 |
+
def __init__(self, fd):
|
| 653 |
+
super().__init__()
|
| 654 |
+
self.fd = fd
|
| 655 |
+
self.zip = zlib.compressobj(zlib.Z_BEST_SPEED)
|
| 656 |
+
|
| 657 |
+
#{ Stream Interface
|
| 658 |
+
|
| 659 |
+
def write(self, data):
|
| 660 |
+
""":raise IOError: If not all bytes could be written
|
| 661 |
+
:return: length of incoming data"""
|
| 662 |
+
self.sha1.update(data)
|
| 663 |
+
cdata = self.zip.compress(data)
|
| 664 |
+
bytes_written = write(self.fd, cdata)
|
| 665 |
+
|
| 666 |
+
if bytes_written != len(cdata):
|
| 667 |
+
raise self.exc
|
| 668 |
+
|
| 669 |
+
return len(data)
|
| 670 |
+
|
| 671 |
+
def close(self):
|
| 672 |
+
remainder = self.zip.flush()
|
| 673 |
+
if write(self.fd, remainder) != len(remainder):
|
| 674 |
+
raise self.exc
|
| 675 |
+
return close(self.fd)
|
| 676 |
+
|
| 677 |
+
#} END stream interface
|
| 678 |
+
|
| 679 |
+
|
| 680 |
+
class FDStream:
|
| 681 |
+
|
| 682 |
+
"""A simple wrapper providing the most basic functions on a file descriptor
|
| 683 |
+
with the fileobject interface. Cannot use os.fdopen as the resulting stream
|
| 684 |
+
takes ownership"""
|
| 685 |
+
__slots__ = ("_fd", '_pos')
|
| 686 |
+
|
| 687 |
+
def __init__(self, fd):
|
| 688 |
+
self._fd = fd
|
| 689 |
+
self._pos = 0
|
| 690 |
+
|
| 691 |
+
def write(self, data):
|
| 692 |
+
self._pos += len(data)
|
| 693 |
+
os.write(self._fd, data)
|
| 694 |
+
|
| 695 |
+
def read(self, count=0):
|
| 696 |
+
if count == 0:
|
| 697 |
+
count = os.path.getsize(self._filepath)
|
| 698 |
+
# END handle read everything
|
| 699 |
+
|
| 700 |
+
bytes = os.read(self._fd, count)
|
| 701 |
+
self._pos += len(bytes)
|
| 702 |
+
return bytes
|
| 703 |
+
|
| 704 |
+
def fileno(self):
|
| 705 |
+
return self._fd
|
| 706 |
+
|
| 707 |
+
def tell(self):
|
| 708 |
+
return self._pos
|
| 709 |
+
|
| 710 |
+
def close(self):
|
| 711 |
+
close(self._fd)
|
| 712 |
+
|
| 713 |
+
|
| 714 |
+
class NullStream:
|
| 715 |
+
|
| 716 |
+
"""A stream that does nothing but providing a stream interface.
|
| 717 |
+
Use it like /dev/null"""
|
| 718 |
+
__slots__ = tuple()
|
| 719 |
+
|
| 720 |
+
def read(self, size=0):
|
| 721 |
+
return ''
|
| 722 |
+
|
| 723 |
+
def close(self):
|
| 724 |
+
pass
|
| 725 |
+
|
| 726 |
+
def write(self, data):
|
| 727 |
+
return len(data)
|
| 728 |
+
|
| 729 |
+
|
| 730 |
+
#} END W streams
|
vllm/lib/python3.10/site-packages/gitdb/test/__pycache__/lib.cpython-310.pyc
ADDED
|
Binary file (5.62 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/gitdb/test/__pycache__/test_base.cpython-310.pyc
ADDED
|
Binary file (1.96 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/gitdb/test/__pycache__/test_stream.cpython-310.pyc
ADDED
|
Binary file (4.1 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/gitdb/test/__pycache__/test_util.cpython-310.pyc
ADDED
|
Binary file (2.15 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/gitdb/test/lib.py
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
|
| 2 |
+
#
|
| 3 |
+
# This module is part of GitDB and is released under
|
| 4 |
+
# the New BSD License: https://opensource.org/license/bsd-3-clause/
|
| 5 |
+
"""Utilities used in ODB testing"""
|
| 6 |
+
from gitdb import OStream
|
| 7 |
+
|
| 8 |
+
import sys
|
| 9 |
+
import random
|
| 10 |
+
from array import array
|
| 11 |
+
|
| 12 |
+
from io import BytesIO
|
| 13 |
+
|
| 14 |
+
import glob
|
| 15 |
+
import unittest
|
| 16 |
+
import tempfile
|
| 17 |
+
import shutil
|
| 18 |
+
import os
|
| 19 |
+
import gc
|
| 20 |
+
import logging
|
| 21 |
+
from functools import wraps
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
#{ Bases
|
| 25 |
+
|
| 26 |
+
class TestBase(unittest.TestCase):
|
| 27 |
+
"""Base class for all tests
|
| 28 |
+
|
| 29 |
+
TestCase providing access to readonly repositories using the following member variables.
|
| 30 |
+
|
| 31 |
+
* gitrepopath
|
| 32 |
+
|
| 33 |
+
* read-only base path of the git source repository, i.e. .../git/.git
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
#{ Invvariants
|
| 37 |
+
k_env_git_repo = "GITDB_TEST_GIT_REPO_BASE"
|
| 38 |
+
#} END invariants
|
| 39 |
+
|
| 40 |
+
@classmethod
|
| 41 |
+
def setUpClass(cls):
|
| 42 |
+
try:
|
| 43 |
+
super().setUpClass()
|
| 44 |
+
except AttributeError:
|
| 45 |
+
pass
|
| 46 |
+
|
| 47 |
+
cls.gitrepopath = os.environ.get(cls.k_env_git_repo)
|
| 48 |
+
if not cls.gitrepopath:
|
| 49 |
+
logging.info(
|
| 50 |
+
"You can set the %s environment variable to a .git repository of your choice - defaulting to the gitdb repository", cls.k_env_git_repo)
|
| 51 |
+
ospd = os.path.dirname
|
| 52 |
+
cls.gitrepopath = os.path.join(ospd(ospd(ospd(__file__))), '.git')
|
| 53 |
+
# end assure gitrepo is set
|
| 54 |
+
assert cls.gitrepopath.endswith('.git')
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
#} END bases
|
| 58 |
+
|
| 59 |
+
#{ Decorators
|
| 60 |
+
|
| 61 |
+
def with_rw_directory(func):
|
| 62 |
+
"""Create a temporary directory which can be written to, remove it if the
|
| 63 |
+
test succeeds, but leave it otherwise to aid additional debugging"""
|
| 64 |
+
|
| 65 |
+
def wrapper(self):
|
| 66 |
+
path = tempfile.mktemp(prefix=func.__name__)
|
| 67 |
+
os.mkdir(path)
|
| 68 |
+
keep = False
|
| 69 |
+
try:
|
| 70 |
+
try:
|
| 71 |
+
return func(self, path)
|
| 72 |
+
except Exception:
|
| 73 |
+
sys.stderr.write(f"Test {type(self).__name__}.{func.__name__} failed, output is at {path!r}\n")
|
| 74 |
+
keep = True
|
| 75 |
+
raise
|
| 76 |
+
finally:
|
| 77 |
+
# Need to collect here to be sure all handles have been closed. It appears
|
| 78 |
+
# a windows-only issue. In fact things should be deleted, as well as
|
| 79 |
+
# memory maps closed, once objects go out of scope. For some reason
|
| 80 |
+
# though this is not the case here unless we collect explicitly.
|
| 81 |
+
if not keep:
|
| 82 |
+
gc.collect()
|
| 83 |
+
shutil.rmtree(path)
|
| 84 |
+
# END handle exception
|
| 85 |
+
# END wrapper
|
| 86 |
+
|
| 87 |
+
wrapper.__name__ = func.__name__
|
| 88 |
+
return wrapper
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def with_packs_rw(func):
|
| 92 |
+
"""Function that provides a path into which the packs for testing should be
|
| 93 |
+
copied. Will pass on the path to the actual function afterwards"""
|
| 94 |
+
|
| 95 |
+
def wrapper(self, path):
|
| 96 |
+
src_pack_glob = fixture_path('packs/*')
|
| 97 |
+
copy_files_globbed(src_pack_glob, path, hard_link_ok=True)
|
| 98 |
+
return func(self, path)
|
| 99 |
+
# END wrapper
|
| 100 |
+
|
| 101 |
+
wrapper.__name__ = func.__name__
|
| 102 |
+
return wrapper
|
| 103 |
+
|
| 104 |
+
#} END decorators
|
| 105 |
+
|
| 106 |
+
#{ Routines
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def fixture_path(relapath=''):
|
| 110 |
+
""":return: absolute path into the fixture directory
|
| 111 |
+
:param relapath: relative path into the fixtures directory, or ''
|
| 112 |
+
to obtain the fixture directory itself"""
|
| 113 |
+
return os.path.join(os.path.dirname(__file__), 'fixtures', relapath)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def copy_files_globbed(source_glob, target_dir, hard_link_ok=False):
|
| 117 |
+
"""Copy all files found according to the given source glob into the target directory
|
| 118 |
+
:param hard_link_ok: if True, hard links will be created if possible. Otherwise
|
| 119 |
+
the files will be copied"""
|
| 120 |
+
for src_file in glob.glob(source_glob):
|
| 121 |
+
if hard_link_ok and hasattr(os, 'link'):
|
| 122 |
+
target = os.path.join(target_dir, os.path.basename(src_file))
|
| 123 |
+
try:
|
| 124 |
+
os.link(src_file, target)
|
| 125 |
+
except OSError:
|
| 126 |
+
shutil.copy(src_file, target_dir)
|
| 127 |
+
# END handle cross device links ( and resulting failure )
|
| 128 |
+
else:
|
| 129 |
+
shutil.copy(src_file, target_dir)
|
| 130 |
+
# END try hard link
|
| 131 |
+
# END for each file to copy
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def make_bytes(size_in_bytes, randomize=False):
|
| 135 |
+
""":return: string with given size in bytes
|
| 136 |
+
:param randomize: try to produce a very random stream"""
|
| 137 |
+
actual_size = size_in_bytes // 4
|
| 138 |
+
producer = range(actual_size)
|
| 139 |
+
if randomize:
|
| 140 |
+
producer = list(producer)
|
| 141 |
+
random.shuffle(producer)
|
| 142 |
+
# END randomize
|
| 143 |
+
a = array('i', producer)
|
| 144 |
+
return a.tobytes()
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def make_object(type, data):
|
| 148 |
+
""":return: bytes resembling an uncompressed object"""
|
| 149 |
+
odata = "blob %i\0" % len(data)
|
| 150 |
+
return odata.encode("ascii") + data
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def make_memory_file(size_in_bytes, randomize=False):
|
| 154 |
+
""":return: tuple(size_of_stream, stream)
|
| 155 |
+
:param randomize: try to produce a very random stream"""
|
| 156 |
+
d = make_bytes(size_in_bytes, randomize)
|
| 157 |
+
return len(d), BytesIO(d)
|
| 158 |
+
|
| 159 |
+
#} END routines
|
| 160 |
+
|
| 161 |
+
#{ Stream Utilities
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
class DummyStream:
|
| 165 |
+
|
| 166 |
+
def __init__(self):
|
| 167 |
+
self.was_read = False
|
| 168 |
+
self.bytes = 0
|
| 169 |
+
self.closed = False
|
| 170 |
+
|
| 171 |
+
def read(self, size):
|
| 172 |
+
self.was_read = True
|
| 173 |
+
self.bytes = size
|
| 174 |
+
|
| 175 |
+
def close(self):
|
| 176 |
+
self.closed = True
|
| 177 |
+
|
| 178 |
+
def _assert(self):
|
| 179 |
+
assert self.was_read
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
class DeriveTest(OStream):
|
| 183 |
+
|
| 184 |
+
def __init__(self, sha, type, size, stream, *args, **kwargs):
|
| 185 |
+
self.myarg = kwargs.pop('myarg')
|
| 186 |
+
self.args = args
|
| 187 |
+
|
| 188 |
+
def _assert(self):
|
| 189 |
+
assert self.args
|
| 190 |
+
assert self.myarg
|
| 191 |
+
|
| 192 |
+
#} END stream utilitiess
|
vllm/lib/python3.10/site-packages/gitdb/test/test_base.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
|
| 2 |
+
#
|
| 3 |
+
# This module is part of GitDB and is released under
|
| 4 |
+
# the New BSD License: https://opensource.org/license/bsd-3-clause/
|
| 5 |
+
"""Test for object db"""
|
| 6 |
+
from gitdb.test.lib import (
|
| 7 |
+
TestBase,
|
| 8 |
+
DummyStream,
|
| 9 |
+
DeriveTest,
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
from gitdb import (
|
| 13 |
+
OInfo,
|
| 14 |
+
OPackInfo,
|
| 15 |
+
ODeltaPackInfo,
|
| 16 |
+
OStream,
|
| 17 |
+
OPackStream,
|
| 18 |
+
ODeltaPackStream,
|
| 19 |
+
IStream
|
| 20 |
+
)
|
| 21 |
+
from gitdb.util import (
|
| 22 |
+
NULL_BIN_SHA
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
from gitdb.typ import (
|
| 26 |
+
str_blob_type
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class TestBaseTypes(TestBase):
|
| 31 |
+
|
| 32 |
+
def test_streams(self):
|
| 33 |
+
# test info
|
| 34 |
+
sha = NULL_BIN_SHA
|
| 35 |
+
s = 20
|
| 36 |
+
blob_id = 3
|
| 37 |
+
|
| 38 |
+
info = OInfo(sha, str_blob_type, s)
|
| 39 |
+
assert info.binsha == sha
|
| 40 |
+
assert info.type == str_blob_type
|
| 41 |
+
assert info.type_id == blob_id
|
| 42 |
+
assert info.size == s
|
| 43 |
+
|
| 44 |
+
# test pack info
|
| 45 |
+
# provides type_id
|
| 46 |
+
pinfo = OPackInfo(0, blob_id, s)
|
| 47 |
+
assert pinfo.type == str_blob_type
|
| 48 |
+
assert pinfo.type_id == blob_id
|
| 49 |
+
assert pinfo.pack_offset == 0
|
| 50 |
+
|
| 51 |
+
dpinfo = ODeltaPackInfo(0, blob_id, s, sha)
|
| 52 |
+
assert dpinfo.type == str_blob_type
|
| 53 |
+
assert dpinfo.type_id == blob_id
|
| 54 |
+
assert dpinfo.delta_info == sha
|
| 55 |
+
assert dpinfo.pack_offset == 0
|
| 56 |
+
|
| 57 |
+
# test ostream
|
| 58 |
+
stream = DummyStream()
|
| 59 |
+
ostream = OStream(*(info + (stream, )))
|
| 60 |
+
assert ostream.stream is stream
|
| 61 |
+
ostream.read(15)
|
| 62 |
+
stream._assert()
|
| 63 |
+
assert stream.bytes == 15
|
| 64 |
+
ostream.read(20)
|
| 65 |
+
assert stream.bytes == 20
|
| 66 |
+
|
| 67 |
+
# test packstream
|
| 68 |
+
postream = OPackStream(*(pinfo + (stream, )))
|
| 69 |
+
assert postream.stream is stream
|
| 70 |
+
postream.read(10)
|
| 71 |
+
stream._assert()
|
| 72 |
+
assert stream.bytes == 10
|
| 73 |
+
|
| 74 |
+
# test deltapackstream
|
| 75 |
+
dpostream = ODeltaPackStream(*(dpinfo + (stream, )))
|
| 76 |
+
assert dpostream.stream is stream
|
| 77 |
+
dpostream.read(5)
|
| 78 |
+
stream._assert()
|
| 79 |
+
assert stream.bytes == 5
|
| 80 |
+
|
| 81 |
+
# derive with own args
|
| 82 |
+
DeriveTest(sha, str_blob_type, s, stream, 'mine', myarg=3)._assert()
|
| 83 |
+
|
| 84 |
+
# test istream
|
| 85 |
+
istream = IStream(str_blob_type, s, stream)
|
| 86 |
+
assert istream.binsha == None
|
| 87 |
+
istream.binsha = sha
|
| 88 |
+
assert istream.binsha == sha
|
| 89 |
+
|
| 90 |
+
assert len(istream.binsha) == 20
|
| 91 |
+
assert len(istream.hexsha) == 40
|
| 92 |
+
|
| 93 |
+
assert istream.size == s
|
| 94 |
+
istream.size = s * 2
|
| 95 |
+
assert istream.size == s * 2
|
| 96 |
+
assert istream.type == str_blob_type
|
| 97 |
+
istream.type = "something"
|
| 98 |
+
assert istream.type == "something"
|
| 99 |
+
assert istream.stream is stream
|
| 100 |
+
istream.stream = None
|
| 101 |
+
assert istream.stream is None
|
| 102 |
+
|
| 103 |
+
assert istream.error is None
|
| 104 |
+
istream.error = Exception()
|
| 105 |
+
assert isinstance(istream.error, Exception)
|
vllm/lib/python3.10/site-packages/gitdb/test/test_example.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
|
| 2 |
+
#
|
| 3 |
+
# This module is part of GitDB and is released under
|
| 4 |
+
# the New BSD License: https://opensource.org/license/bsd-3-clause/
|
| 5 |
+
"""Module with examples from the tutorial section of the docs"""
|
| 6 |
+
import os
|
| 7 |
+
from gitdb.test.lib import TestBase
|
| 8 |
+
from gitdb import IStream
|
| 9 |
+
from gitdb.db import LooseObjectDB
|
| 10 |
+
|
| 11 |
+
from io import BytesIO
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class TestExamples(TestBase):
|
| 15 |
+
|
| 16 |
+
def test_base(self):
|
| 17 |
+
ldb = LooseObjectDB(os.path.join(self.gitrepopath, 'objects'))
|
| 18 |
+
|
| 19 |
+
for sha1 in ldb.sha_iter():
|
| 20 |
+
oinfo = ldb.info(sha1)
|
| 21 |
+
ostream = ldb.stream(sha1)
|
| 22 |
+
assert oinfo[:3] == ostream[:3]
|
| 23 |
+
|
| 24 |
+
assert len(ostream.read()) == ostream.size
|
| 25 |
+
assert ldb.has_object(oinfo.binsha)
|
| 26 |
+
# END for each sha in database
|
| 27 |
+
# assure we close all files
|
| 28 |
+
try:
|
| 29 |
+
del(ostream)
|
| 30 |
+
del(oinfo)
|
| 31 |
+
except UnboundLocalError:
|
| 32 |
+
pass
|
| 33 |
+
# END ignore exception if there are no loose objects
|
| 34 |
+
|
| 35 |
+
data = b"my data"
|
| 36 |
+
istream = IStream("blob", len(data), BytesIO(data))
|
| 37 |
+
|
| 38 |
+
# the object does not yet have a sha
|
| 39 |
+
assert istream.binsha is None
|
| 40 |
+
ldb.store(istream)
|
| 41 |
+
# now the sha is set
|
| 42 |
+
assert len(istream.binsha) == 20
|
| 43 |
+
assert ldb.has_object(istream.binsha)
|
vllm/lib/python3.10/site-packages/gitdb/test/test_stream.py
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
|
| 2 |
+
#
|
| 3 |
+
# This module is part of GitDB and is released under
|
| 4 |
+
# the New BSD License: https://opensource.org/license/bsd-3-clause/
|
| 5 |
+
"""Test for object db"""
|
| 6 |
+
|
| 7 |
+
from gitdb.test.lib import (
|
| 8 |
+
TestBase,
|
| 9 |
+
DummyStream,
|
| 10 |
+
make_bytes,
|
| 11 |
+
make_object,
|
| 12 |
+
fixture_path
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
from gitdb import (
|
| 16 |
+
DecompressMemMapReader,
|
| 17 |
+
FDCompressedSha1Writer,
|
| 18 |
+
LooseObjectDB,
|
| 19 |
+
Sha1Writer,
|
| 20 |
+
MemoryDB,
|
| 21 |
+
IStream,
|
| 22 |
+
)
|
| 23 |
+
from gitdb.util import hex_to_bin
|
| 24 |
+
|
| 25 |
+
import zlib
|
| 26 |
+
from gitdb.typ import (
|
| 27 |
+
str_blob_type
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
import tempfile
|
| 31 |
+
import os
|
| 32 |
+
from io import BytesIO
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class TestStream(TestBase):
|
| 36 |
+
|
| 37 |
+
"""Test stream classes"""
|
| 38 |
+
|
| 39 |
+
data_sizes = (15, 10000, 1000 * 1024 + 512)
|
| 40 |
+
|
| 41 |
+
def _assert_stream_reader(self, stream, cdata, rewind_stream=lambda s: None):
|
| 42 |
+
"""Make stream tests - the orig_stream is seekable, allowing it to be
|
| 43 |
+
rewound and reused
|
| 44 |
+
:param cdata: the data we expect to read from stream, the contents
|
| 45 |
+
:param rewind_stream: function called to rewind the stream to make it ready
|
| 46 |
+
for reuse"""
|
| 47 |
+
ns = 10
|
| 48 |
+
assert len(cdata) > ns - 1, "Data must be larger than %i, was %i" % (ns, len(cdata))
|
| 49 |
+
|
| 50 |
+
# read in small steps
|
| 51 |
+
ss = len(cdata) // ns
|
| 52 |
+
for i in range(ns):
|
| 53 |
+
data = stream.read(ss)
|
| 54 |
+
chunk = cdata[i * ss:(i + 1) * ss]
|
| 55 |
+
assert data == chunk
|
| 56 |
+
# END for each step
|
| 57 |
+
rest = stream.read()
|
| 58 |
+
if rest:
|
| 59 |
+
assert rest == cdata[-len(rest):]
|
| 60 |
+
# END handle rest
|
| 61 |
+
|
| 62 |
+
if isinstance(stream, DecompressMemMapReader):
|
| 63 |
+
assert len(stream.data()) == stream.compressed_bytes_read()
|
| 64 |
+
# END handle special type
|
| 65 |
+
|
| 66 |
+
rewind_stream(stream)
|
| 67 |
+
|
| 68 |
+
# read everything
|
| 69 |
+
rdata = stream.read()
|
| 70 |
+
assert rdata == cdata
|
| 71 |
+
|
| 72 |
+
if isinstance(stream, DecompressMemMapReader):
|
| 73 |
+
assert len(stream.data()) == stream.compressed_bytes_read()
|
| 74 |
+
# END handle special type
|
| 75 |
+
|
| 76 |
+
def test_decompress_reader(self):
|
| 77 |
+
for close_on_deletion in range(2):
|
| 78 |
+
for with_size in range(2):
|
| 79 |
+
for ds in self.data_sizes:
|
| 80 |
+
cdata = make_bytes(ds, randomize=False)
|
| 81 |
+
|
| 82 |
+
# zdata = zipped actual data
|
| 83 |
+
# cdata = original content data
|
| 84 |
+
|
| 85 |
+
# create reader
|
| 86 |
+
if with_size:
|
| 87 |
+
# need object data
|
| 88 |
+
zdata = zlib.compress(make_object(str_blob_type, cdata))
|
| 89 |
+
typ, size, reader = DecompressMemMapReader.new(zdata, close_on_deletion)
|
| 90 |
+
assert size == len(cdata)
|
| 91 |
+
assert typ == str_blob_type
|
| 92 |
+
|
| 93 |
+
# even if we don't set the size, it will be set automatically on first read
|
| 94 |
+
test_reader = DecompressMemMapReader(zdata, close_on_deletion=False)
|
| 95 |
+
assert test_reader._s == len(cdata)
|
| 96 |
+
else:
|
| 97 |
+
# here we need content data
|
| 98 |
+
zdata = zlib.compress(cdata)
|
| 99 |
+
reader = DecompressMemMapReader(zdata, close_on_deletion, len(cdata))
|
| 100 |
+
assert reader._s == len(cdata)
|
| 101 |
+
# END get reader
|
| 102 |
+
|
| 103 |
+
self._assert_stream_reader(reader, cdata, lambda r: r.seek(0))
|
| 104 |
+
|
| 105 |
+
# put in a dummy stream for closing
|
| 106 |
+
dummy = DummyStream()
|
| 107 |
+
reader._m = dummy
|
| 108 |
+
|
| 109 |
+
assert not dummy.closed
|
| 110 |
+
del(reader)
|
| 111 |
+
assert dummy.closed == close_on_deletion
|
| 112 |
+
# END for each datasize
|
| 113 |
+
# END whether size should be used
|
| 114 |
+
# END whether stream should be closed when deleted
|
| 115 |
+
|
| 116 |
+
def test_sha_writer(self):
|
| 117 |
+
writer = Sha1Writer()
|
| 118 |
+
assert 2 == writer.write(b"hi")
|
| 119 |
+
assert len(writer.sha(as_hex=1)) == 40
|
| 120 |
+
assert len(writer.sha(as_hex=0)) == 20
|
| 121 |
+
|
| 122 |
+
# make sure it does something ;)
|
| 123 |
+
prev_sha = writer.sha()
|
| 124 |
+
writer.write(b"hi again")
|
| 125 |
+
assert writer.sha() != prev_sha
|
| 126 |
+
|
| 127 |
+
def test_compressed_writer(self):
|
| 128 |
+
for ds in self.data_sizes:
|
| 129 |
+
fd, path = tempfile.mkstemp()
|
| 130 |
+
ostream = FDCompressedSha1Writer(fd)
|
| 131 |
+
data = make_bytes(ds, randomize=False)
|
| 132 |
+
|
| 133 |
+
# for now, just a single write, code doesn't care about chunking
|
| 134 |
+
assert len(data) == ostream.write(data)
|
| 135 |
+
ostream.close()
|
| 136 |
+
|
| 137 |
+
# its closed already
|
| 138 |
+
self.assertRaises(OSError, os.close, fd)
|
| 139 |
+
|
| 140 |
+
# read everything back, compare to data we zip
|
| 141 |
+
fd = os.open(path, os.O_RDONLY | getattr(os, 'O_BINARY', 0))
|
| 142 |
+
written_data = os.read(fd, os.path.getsize(path))
|
| 143 |
+
assert len(written_data) == os.path.getsize(path)
|
| 144 |
+
os.close(fd)
|
| 145 |
+
assert written_data == zlib.compress(data, 1) # best speed
|
| 146 |
+
|
| 147 |
+
os.remove(path)
|
| 148 |
+
# END for each os
|
| 149 |
+
|
| 150 |
+
def test_decompress_reader_special_case(self):
|
| 151 |
+
odb = LooseObjectDB(fixture_path('objects'))
|
| 152 |
+
mdb = MemoryDB()
|
| 153 |
+
for sha in (b'888401851f15db0eed60eb1bc29dec5ddcace911',
|
| 154 |
+
b'7bb839852ed5e3a069966281bb08d50012fb309b',):
|
| 155 |
+
ostream = odb.stream(hex_to_bin(sha))
|
| 156 |
+
|
| 157 |
+
# if there is a bug, we will be missing one byte exactly !
|
| 158 |
+
data = ostream.read()
|
| 159 |
+
assert len(data) == ostream.size
|
| 160 |
+
|
| 161 |
+
# Putting it back in should yield nothing new - after all, we have
|
| 162 |
+
dump = mdb.store(IStream(ostream.type, ostream.size, BytesIO(data)))
|
| 163 |
+
assert dump.hexsha == sha
|
| 164 |
+
# end for each loose object sha to test
|
vllm/lib/python3.10/site-packages/gitdb/test/test_util.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
|
| 2 |
+
#
|
| 3 |
+
# This module is part of GitDB and is released under
|
| 4 |
+
# the New BSD License: https://opensource.org/license/bsd-3-clause/
|
| 5 |
+
"""Test for object db"""
|
| 6 |
+
import tempfile
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
+
from gitdb.test.lib import TestBase
|
| 10 |
+
from gitdb.util import (
|
| 11 |
+
to_hex_sha,
|
| 12 |
+
to_bin_sha,
|
| 13 |
+
NULL_HEX_SHA,
|
| 14 |
+
LockedFD
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class TestUtils(TestBase):
|
| 19 |
+
|
| 20 |
+
def test_basics(self):
|
| 21 |
+
assert to_hex_sha(NULL_HEX_SHA) == NULL_HEX_SHA
|
| 22 |
+
assert len(to_bin_sha(NULL_HEX_SHA)) == 20
|
| 23 |
+
assert to_hex_sha(to_bin_sha(NULL_HEX_SHA)) == NULL_HEX_SHA.encode("ascii")
|
| 24 |
+
|
| 25 |
+
def _cmp_contents(self, file_path, data):
|
| 26 |
+
# raise if data from file at file_path
|
| 27 |
+
# does not match data string
|
| 28 |
+
with open(file_path, "rb") as fp:
|
| 29 |
+
assert fp.read() == data.encode("ascii")
|
| 30 |
+
|
| 31 |
+
def test_lockedfd(self):
|
| 32 |
+
my_file = tempfile.mktemp()
|
| 33 |
+
orig_data = "hello"
|
| 34 |
+
new_data = "world"
|
| 35 |
+
with open(my_file, "wb") as my_file_fp:
|
| 36 |
+
my_file_fp.write(orig_data.encode("ascii"))
|
| 37 |
+
|
| 38 |
+
try:
|
| 39 |
+
lfd = LockedFD(my_file)
|
| 40 |
+
lockfilepath = lfd._lockfilepath()
|
| 41 |
+
|
| 42 |
+
# cannot end before it was started
|
| 43 |
+
self.assertRaises(AssertionError, lfd.rollback)
|
| 44 |
+
self.assertRaises(AssertionError, lfd.commit)
|
| 45 |
+
|
| 46 |
+
# open for writing
|
| 47 |
+
assert not os.path.isfile(lockfilepath)
|
| 48 |
+
wfd = lfd.open(write=True)
|
| 49 |
+
assert lfd._fd is wfd
|
| 50 |
+
assert os.path.isfile(lockfilepath)
|
| 51 |
+
|
| 52 |
+
# write data and fail
|
| 53 |
+
os.write(wfd, new_data.encode("ascii"))
|
| 54 |
+
lfd.rollback()
|
| 55 |
+
assert lfd._fd is None
|
| 56 |
+
self._cmp_contents(my_file, orig_data)
|
| 57 |
+
assert not os.path.isfile(lockfilepath)
|
| 58 |
+
|
| 59 |
+
# additional call doesn't fail
|
| 60 |
+
lfd.commit()
|
| 61 |
+
lfd.rollback()
|
| 62 |
+
|
| 63 |
+
# test reading
|
| 64 |
+
lfd = LockedFD(my_file)
|
| 65 |
+
rfd = lfd.open(write=False)
|
| 66 |
+
assert os.read(rfd, len(orig_data)) == orig_data.encode("ascii")
|
| 67 |
+
|
| 68 |
+
assert os.path.isfile(lockfilepath)
|
| 69 |
+
# deletion rolls back
|
| 70 |
+
del(lfd)
|
| 71 |
+
assert not os.path.isfile(lockfilepath)
|
| 72 |
+
|
| 73 |
+
# write data - concurrently
|
| 74 |
+
lfd = LockedFD(my_file)
|
| 75 |
+
olfd = LockedFD(my_file)
|
| 76 |
+
assert not os.path.isfile(lockfilepath)
|
| 77 |
+
wfdstream = lfd.open(write=True, stream=True) # this time as stream
|
| 78 |
+
assert os.path.isfile(lockfilepath)
|
| 79 |
+
# another one fails
|
| 80 |
+
self.assertRaises(IOError, olfd.open)
|
| 81 |
+
|
| 82 |
+
wfdstream.write(new_data.encode("ascii"))
|
| 83 |
+
lfd.commit()
|
| 84 |
+
assert not os.path.isfile(lockfilepath)
|
| 85 |
+
self._cmp_contents(my_file, new_data)
|
| 86 |
+
|
| 87 |
+
# could test automatic _end_writing on destruction
|
| 88 |
+
finally:
|
| 89 |
+
os.remove(my_file)
|
| 90 |
+
# END final cleanup
|
| 91 |
+
|
| 92 |
+
# try non-existing file for reading
|
| 93 |
+
lfd = LockedFD(tempfile.mktemp())
|
| 94 |
+
try:
|
| 95 |
+
lfd.open(write=False)
|
| 96 |
+
except OSError:
|
| 97 |
+
assert not os.path.exists(lfd._lockfilepath())
|
| 98 |
+
else:
|
| 99 |
+
self.fail("expected OSError")
|
| 100 |
+
# END handle exceptions
|
vllm/lib/python3.10/site-packages/gitdb/typ.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
|
| 2 |
+
#
|
| 3 |
+
# This module is part of GitDB and is released under
|
| 4 |
+
# the New BSD License: https://opensource.org/license/bsd-3-clause/
|
| 5 |
+
"""Module containing information about types known to the database"""
|
| 6 |
+
|
| 7 |
+
str_blob_type = b'blob'
|
| 8 |
+
str_commit_type = b'commit'
|
| 9 |
+
str_tree_type = b'tree'
|
| 10 |
+
str_tag_type = b'tag'
|
vllm/lib/python3.10/site-packages/gitdb/util.py
ADDED
|
@@ -0,0 +1,398 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
|
| 2 |
+
#
|
| 3 |
+
# This module is part of GitDB and is released under
|
| 4 |
+
# the New BSD License: https://opensource.org/license/bsd-3-clause/
|
| 5 |
+
import binascii
|
| 6 |
+
import os
|
| 7 |
+
import mmap
|
| 8 |
+
import sys
|
| 9 |
+
import time
|
| 10 |
+
import errno
|
| 11 |
+
|
| 12 |
+
from io import BytesIO
|
| 13 |
+
|
| 14 |
+
from smmap import (
|
| 15 |
+
StaticWindowMapManager,
|
| 16 |
+
SlidingWindowMapManager,
|
| 17 |
+
SlidingWindowMapBuffer
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
# initialize our global memory manager instance
|
| 21 |
+
# Use it to free cached (and unused) resources.
|
| 22 |
+
mman = SlidingWindowMapManager()
|
| 23 |
+
# END handle mman
|
| 24 |
+
|
| 25 |
+
import hashlib
|
| 26 |
+
|
| 27 |
+
try:
|
| 28 |
+
from struct import unpack_from
|
| 29 |
+
except ImportError:
|
| 30 |
+
from struct import unpack, calcsize
|
| 31 |
+
__calcsize_cache = dict()
|
| 32 |
+
|
| 33 |
+
def unpack_from(fmt, data, offset=0):
|
| 34 |
+
try:
|
| 35 |
+
size = __calcsize_cache[fmt]
|
| 36 |
+
except KeyError:
|
| 37 |
+
size = calcsize(fmt)
|
| 38 |
+
__calcsize_cache[fmt] = size
|
| 39 |
+
# END exception handling
|
| 40 |
+
return unpack(fmt, data[offset: offset + size])
|
| 41 |
+
# END own unpack_from implementation
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
#{ Aliases
|
| 45 |
+
|
| 46 |
+
hex_to_bin = binascii.a2b_hex
|
| 47 |
+
bin_to_hex = binascii.b2a_hex
|
| 48 |
+
|
| 49 |
+
# errors
|
| 50 |
+
ENOENT = errno.ENOENT
|
| 51 |
+
|
| 52 |
+
# os shortcuts
|
| 53 |
+
exists = os.path.exists
|
| 54 |
+
mkdir = os.mkdir
|
| 55 |
+
chmod = os.chmod
|
| 56 |
+
isdir = os.path.isdir
|
| 57 |
+
isfile = os.path.isfile
|
| 58 |
+
rename = os.rename
|
| 59 |
+
dirname = os.path.dirname
|
| 60 |
+
basename = os.path.basename
|
| 61 |
+
join = os.path.join
|
| 62 |
+
read = os.read
|
| 63 |
+
write = os.write
|
| 64 |
+
close = os.close
|
| 65 |
+
fsync = os.fsync
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def _retry(func, *args, **kwargs):
|
| 69 |
+
# Wrapper around functions, that are problematic on "Windows". Sometimes
|
| 70 |
+
# the OS or someone else has still a handle to the file
|
| 71 |
+
if sys.platform == "win32":
|
| 72 |
+
for _ in range(10):
|
| 73 |
+
try:
|
| 74 |
+
return func(*args, **kwargs)
|
| 75 |
+
except Exception:
|
| 76 |
+
time.sleep(0.1)
|
| 77 |
+
return func(*args, **kwargs)
|
| 78 |
+
else:
|
| 79 |
+
return func(*args, **kwargs)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def remove(*args, **kwargs):
|
| 83 |
+
return _retry(os.remove, *args, **kwargs)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
# Backwards compatibility imports
|
| 87 |
+
from gitdb.const import (
|
| 88 |
+
NULL_BIN_SHA,
|
| 89 |
+
NULL_HEX_SHA
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
#} END Aliases
|
| 93 |
+
|
| 94 |
+
#{ compatibility stuff ...
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class _RandomAccessBytesIO:
|
| 98 |
+
|
| 99 |
+
"""Wrapper to provide required functionality in case memory maps cannot or may
|
| 100 |
+
not be used. This is only really required in python 2.4"""
|
| 101 |
+
__slots__ = '_sio'
|
| 102 |
+
|
| 103 |
+
def __init__(self, buf=''):
|
| 104 |
+
self._sio = BytesIO(buf)
|
| 105 |
+
|
| 106 |
+
def __getattr__(self, attr):
|
| 107 |
+
return getattr(self._sio, attr)
|
| 108 |
+
|
| 109 |
+
def __len__(self):
|
| 110 |
+
return len(self.getvalue())
|
| 111 |
+
|
| 112 |
+
def __getitem__(self, i):
|
| 113 |
+
return self.getvalue()[i]
|
| 114 |
+
|
| 115 |
+
def __getslice__(self, start, end):
|
| 116 |
+
return self.getvalue()[start:end]
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def byte_ord(b):
|
| 120 |
+
"""
|
| 121 |
+
Return the integer representation of the byte string. This supports Python
|
| 122 |
+
3 byte arrays as well as standard strings.
|
| 123 |
+
"""
|
| 124 |
+
try:
|
| 125 |
+
return ord(b)
|
| 126 |
+
except TypeError:
|
| 127 |
+
return b
|
| 128 |
+
|
| 129 |
+
#} END compatibility stuff ...
|
| 130 |
+
|
| 131 |
+
#{ Routines
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def make_sha(source=b''):
|
| 135 |
+
"""A python2.4 workaround for the sha/hashlib module fiasco
|
| 136 |
+
|
| 137 |
+
**Note** From the dulwich project """
|
| 138 |
+
try:
|
| 139 |
+
return hashlib.sha1(source)
|
| 140 |
+
except NameError:
|
| 141 |
+
import sha
|
| 142 |
+
sha1 = sha.sha(source)
|
| 143 |
+
return sha1
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def allocate_memory(size):
|
| 147 |
+
""":return: a file-protocol accessible memory block of the given size"""
|
| 148 |
+
if size == 0:
|
| 149 |
+
return _RandomAccessBytesIO(b'')
|
| 150 |
+
# END handle empty chunks gracefully
|
| 151 |
+
|
| 152 |
+
try:
|
| 153 |
+
return mmap.mmap(-1, size) # read-write by default
|
| 154 |
+
except OSError:
|
| 155 |
+
# setup real memory instead
|
| 156 |
+
# this of course may fail if the amount of memory is not available in
|
| 157 |
+
# one chunk - would only be the case in python 2.4, being more likely on
|
| 158 |
+
# 32 bit systems.
|
| 159 |
+
return _RandomAccessBytesIO(b"\0" * size)
|
| 160 |
+
# END handle memory allocation
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def file_contents_ro(fd, stream=False, allow_mmap=True):
|
| 164 |
+
""":return: read-only contents of the file represented by the file descriptor fd
|
| 165 |
+
|
| 166 |
+
:param fd: file descriptor opened for reading
|
| 167 |
+
:param stream: if False, random access is provided, otherwise the stream interface
|
| 168 |
+
is provided.
|
| 169 |
+
:param allow_mmap: if True, its allowed to map the contents into memory, which
|
| 170 |
+
allows large files to be handled and accessed efficiently. The file-descriptor
|
| 171 |
+
will change its position if this is False"""
|
| 172 |
+
try:
|
| 173 |
+
if allow_mmap:
|
| 174 |
+
# supports stream and random access
|
| 175 |
+
try:
|
| 176 |
+
return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
|
| 177 |
+
except OSError:
|
| 178 |
+
# python 2.4 issue, 0 wants to be the actual size
|
| 179 |
+
return mmap.mmap(fd, os.fstat(fd).st_size, access=mmap.ACCESS_READ)
|
| 180 |
+
# END handle python 2.4
|
| 181 |
+
except OSError:
|
| 182 |
+
pass
|
| 183 |
+
# END exception handling
|
| 184 |
+
|
| 185 |
+
# read manually
|
| 186 |
+
contents = os.read(fd, os.fstat(fd).st_size)
|
| 187 |
+
if stream:
|
| 188 |
+
return _RandomAccessBytesIO(contents)
|
| 189 |
+
return contents
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def file_contents_ro_filepath(filepath, stream=False, allow_mmap=True, flags=0):
|
| 193 |
+
"""Get the file contents at filepath as fast as possible
|
| 194 |
+
|
| 195 |
+
:return: random access compatible memory of the given filepath
|
| 196 |
+
:param stream: see ``file_contents_ro``
|
| 197 |
+
:param allow_mmap: see ``file_contents_ro``
|
| 198 |
+
:param flags: additional flags to pass to os.open
|
| 199 |
+
:raise OSError: If the file could not be opened
|
| 200 |
+
|
| 201 |
+
**Note** for now we don't try to use O_NOATIME directly as the right value needs to be
|
| 202 |
+
shared per database in fact. It only makes a real difference for loose object
|
| 203 |
+
databases anyway, and they use it with the help of the ``flags`` parameter"""
|
| 204 |
+
fd = os.open(filepath, os.O_RDONLY | getattr(os, 'O_BINARY', 0) | flags)
|
| 205 |
+
try:
|
| 206 |
+
return file_contents_ro(fd, stream, allow_mmap)
|
| 207 |
+
finally:
|
| 208 |
+
close(fd)
|
| 209 |
+
# END assure file is closed
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def sliding_ro_buffer(filepath, flags=0):
|
| 213 |
+
"""
|
| 214 |
+
:return: a buffer compatible object which uses our mapped memory manager internally
|
| 215 |
+
ready to read the whole given filepath"""
|
| 216 |
+
return SlidingWindowMapBuffer(mman.make_cursor(filepath), flags=flags)
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def to_hex_sha(sha):
|
| 220 |
+
""":return: hexified version of sha"""
|
| 221 |
+
if len(sha) == 40:
|
| 222 |
+
return sha
|
| 223 |
+
return bin_to_hex(sha)
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def to_bin_sha(sha):
|
| 227 |
+
if len(sha) == 20:
|
| 228 |
+
return sha
|
| 229 |
+
return hex_to_bin(sha)
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
#} END routines
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
#{ Utilities
|
| 236 |
+
|
| 237 |
+
class LazyMixin:
|
| 238 |
+
|
| 239 |
+
"""
|
| 240 |
+
Base class providing an interface to lazily retrieve attribute values upon
|
| 241 |
+
first access. If slots are used, memory will only be reserved once the attribute
|
| 242 |
+
is actually accessed and retrieved the first time. All future accesses will
|
| 243 |
+
return the cached value as stored in the Instance's dict or slot.
|
| 244 |
+
"""
|
| 245 |
+
|
| 246 |
+
__slots__ = tuple()
|
| 247 |
+
|
| 248 |
+
def __getattr__(self, attr):
|
| 249 |
+
"""
|
| 250 |
+
Whenever an attribute is requested that we do not know, we allow it
|
| 251 |
+
to be created and set. Next time the same attribute is requested, it is simply
|
| 252 |
+
returned from our dict/slots. """
|
| 253 |
+
self._set_cache_(attr)
|
| 254 |
+
# will raise in case the cache was not created
|
| 255 |
+
return object.__getattribute__(self, attr)
|
| 256 |
+
|
| 257 |
+
def _set_cache_(self, attr):
|
| 258 |
+
"""
|
| 259 |
+
This method should be overridden in the derived class.
|
| 260 |
+
It should check whether the attribute named by attr can be created
|
| 261 |
+
and cached. Do nothing if you do not know the attribute or call your subclass
|
| 262 |
+
|
| 263 |
+
The derived class may create as many additional attributes as it deems
|
| 264 |
+
necessary in case a git command returns more information than represented
|
| 265 |
+
in the single attribute."""
|
| 266 |
+
pass
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
class LockedFD:
|
| 270 |
+
|
| 271 |
+
"""
|
| 272 |
+
This class facilitates a safe read and write operation to a file on disk.
|
| 273 |
+
If we write to 'file', we obtain a lock file at 'file.lock' and write to
|
| 274 |
+
that instead. If we succeed, the lock file will be renamed to overwrite
|
| 275 |
+
the original file.
|
| 276 |
+
|
| 277 |
+
When reading, we obtain a lock file, but to prevent other writers from
|
| 278 |
+
succeeding while we are reading the file.
|
| 279 |
+
|
| 280 |
+
This type handles error correctly in that it will assure a consistent state
|
| 281 |
+
on destruction.
|
| 282 |
+
|
| 283 |
+
**note** with this setup, parallel reading is not possible"""
|
| 284 |
+
__slots__ = ("_filepath", '_fd', '_write')
|
| 285 |
+
|
| 286 |
+
def __init__(self, filepath):
|
| 287 |
+
"""Initialize an instance with the givne filepath"""
|
| 288 |
+
self._filepath = filepath
|
| 289 |
+
self._fd = None
|
| 290 |
+
self._write = None # if True, we write a file
|
| 291 |
+
|
| 292 |
+
def __del__(self):
|
| 293 |
+
# will do nothing if the file descriptor is already closed
|
| 294 |
+
if self._fd is not None:
|
| 295 |
+
self.rollback()
|
| 296 |
+
|
| 297 |
+
def _lockfilepath(self):
|
| 298 |
+
return "%s.lock" % self._filepath
|
| 299 |
+
|
| 300 |
+
def open(self, write=False, stream=False):
|
| 301 |
+
"""
|
| 302 |
+
Open the file descriptor for reading or writing, both in binary mode.
|
| 303 |
+
|
| 304 |
+
:param write: if True, the file descriptor will be opened for writing. Other
|
| 305 |
+
wise it will be opened read-only.
|
| 306 |
+
:param stream: if True, the file descriptor will be wrapped into a simple stream
|
| 307 |
+
object which supports only reading or writing
|
| 308 |
+
:return: fd to read from or write to. It is still maintained by this instance
|
| 309 |
+
and must not be closed directly
|
| 310 |
+
:raise IOError: if the lock could not be retrieved
|
| 311 |
+
:raise OSError: If the actual file could not be opened for reading
|
| 312 |
+
|
| 313 |
+
**note** must only be called once"""
|
| 314 |
+
if self._write is not None:
|
| 315 |
+
raise AssertionError("Called %s multiple times" % self.open)
|
| 316 |
+
|
| 317 |
+
self._write = write
|
| 318 |
+
|
| 319 |
+
# try to open the lock file
|
| 320 |
+
binary = getattr(os, 'O_BINARY', 0)
|
| 321 |
+
lockmode = os.O_WRONLY | os.O_CREAT | os.O_EXCL | binary
|
| 322 |
+
try:
|
| 323 |
+
fd = os.open(self._lockfilepath(), lockmode, int("600", 8))
|
| 324 |
+
if not write:
|
| 325 |
+
os.close(fd)
|
| 326 |
+
else:
|
| 327 |
+
self._fd = fd
|
| 328 |
+
# END handle file descriptor
|
| 329 |
+
except OSError as e:
|
| 330 |
+
raise OSError("Lock at %r could not be obtained" % self._lockfilepath()) from e
|
| 331 |
+
# END handle lock retrieval
|
| 332 |
+
|
| 333 |
+
# open actual file if required
|
| 334 |
+
if self._fd is None:
|
| 335 |
+
# we could specify exclusive here, as we obtained the lock anyway
|
| 336 |
+
try:
|
| 337 |
+
self._fd = os.open(self._filepath, os.O_RDONLY | binary)
|
| 338 |
+
except:
|
| 339 |
+
# assure we release our lockfile
|
| 340 |
+
remove(self._lockfilepath())
|
| 341 |
+
raise
|
| 342 |
+
# END handle lockfile
|
| 343 |
+
# END open descriptor for reading
|
| 344 |
+
|
| 345 |
+
if stream:
|
| 346 |
+
# need delayed import
|
| 347 |
+
from gitdb.stream import FDStream
|
| 348 |
+
return FDStream(self._fd)
|
| 349 |
+
else:
|
| 350 |
+
return self._fd
|
| 351 |
+
# END handle stream
|
| 352 |
+
|
| 353 |
+
def commit(self):
|
| 354 |
+
"""When done writing, call this function to commit your changes into the
|
| 355 |
+
actual file.
|
| 356 |
+
The file descriptor will be closed, and the lockfile handled.
|
| 357 |
+
|
| 358 |
+
**Note** can be called multiple times"""
|
| 359 |
+
self._end_writing(successful=True)
|
| 360 |
+
|
| 361 |
+
def rollback(self):
|
| 362 |
+
"""Abort your operation without any changes. The file descriptor will be
|
| 363 |
+
closed, and the lock released.
|
| 364 |
+
|
| 365 |
+
**Note** can be called multiple times"""
|
| 366 |
+
self._end_writing(successful=False)
|
| 367 |
+
|
| 368 |
+
def _end_writing(self, successful=True):
|
| 369 |
+
"""Handle the lock according to the write mode """
|
| 370 |
+
if self._write is None:
|
| 371 |
+
raise AssertionError("Cannot end operation if it wasn't started yet")
|
| 372 |
+
|
| 373 |
+
if self._fd is None:
|
| 374 |
+
return
|
| 375 |
+
|
| 376 |
+
os.close(self._fd)
|
| 377 |
+
self._fd = None
|
| 378 |
+
|
| 379 |
+
lockfile = self._lockfilepath()
|
| 380 |
+
if self._write and successful:
|
| 381 |
+
# on windows, rename does not silently overwrite the existing one
|
| 382 |
+
if sys.platform == "win32":
|
| 383 |
+
if isfile(self._filepath):
|
| 384 |
+
remove(self._filepath)
|
| 385 |
+
# END remove if exists
|
| 386 |
+
# END win32 special handling
|
| 387 |
+
os.rename(lockfile, self._filepath)
|
| 388 |
+
|
| 389 |
+
# assure others can at least read the file - the tmpfile left it at rw--
|
| 390 |
+
# We may also write that file, on windows that boils down to a remove-
|
| 391 |
+
# protection as well
|
| 392 |
+
chmod(self._filepath, int("644", 8))
|
| 393 |
+
else:
|
| 394 |
+
# just delete the file so far, we failed
|
| 395 |
+
remove(lockfile)
|
| 396 |
+
# END successful handling
|
| 397 |
+
|
| 398 |
+
#} END utilities
|
vllm/lib/python3.10/site-packages/gitdb/utils/encoding.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def force_bytes(data, encoding="utf-8"):
|
| 2 |
+
if isinstance(data, bytes):
|
| 3 |
+
return data
|
| 4 |
+
|
| 5 |
+
if isinstance(data, str):
|
| 6 |
+
return data.encode(encoding)
|
| 7 |
+
|
| 8 |
+
return data
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def force_text(data, encoding="utf-8"):
|
| 12 |
+
if isinstance(data, str):
|
| 13 |
+
return data
|
| 14 |
+
|
| 15 |
+
if isinstance(data, bytes):
|
| 16 |
+
return data.decode(encoding)
|
| 17 |
+
|
| 18 |
+
return str(data, encoding)
|
vllm/lib/python3.10/site-packages/tqdm/__main__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .cli import main
|
| 2 |
+
|
| 3 |
+
main()
|
vllm/lib/python3.10/site-packages/tqdm/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.47 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/tqdm/__pycache__/__main__.cpython-310.pyc
ADDED
|
Binary file (194 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/tqdm/__pycache__/_dist_ver.cpython-310.pyc
ADDED
|
Binary file (177 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/tqdm/__pycache__/_main.cpython-310.pyc
ADDED
|
Binary file (428 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/tqdm/__pycache__/_monitor.cpython-310.pyc
ADDED
|
Binary file (2.79 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm.cpython-310.pyc
ADDED
|
Binary file (423 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_gui.cpython-310.pyc
ADDED
|
Binary file (436 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_notebook.cpython-310.pyc
ADDED
|
Binary file (456 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/tqdm/__pycache__/_tqdm_pandas.cpython-310.pyc
ADDED
|
Binary file (944 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/tqdm/__pycache__/_utils.cpython-310.pyc
ADDED
|
Binary file (809 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/tqdm/__pycache__/asyncio.cpython-310.pyc
ADDED
|
Binary file (3.35 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/tqdm/__pycache__/auto.cpython-310.pyc
ADDED
|
Binary file (1.1 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/tqdm/__pycache__/autonotebook.cpython-310.pyc
ADDED
|
Binary file (990 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/tqdm/__pycache__/cli.cpython-310.pyc
ADDED
|
Binary file (9.17 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/tqdm/__pycache__/dask.cpython-310.pyc
ADDED
|
Binary file (2.05 kB). View file
|
|
|