ZTWHHH commited on
Commit
973a66f
·
verified ·
1 Parent(s): 4b5844f

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +5 -0
  2. valley/lib/python3.10/site-packages/annotated_types/__init__.py +432 -0
  3. valley/lib/python3.10/site-packages/annotated_types/__pycache__/__init__.cpython-310.pyc +0 -0
  4. valley/lib/python3.10/site-packages/annotated_types/__pycache__/test_cases.cpython-310.pyc +0 -0
  5. valley/lib/python3.10/site-packages/annotated_types/py.typed +0 -0
  6. valley/lib/python3.10/site-packages/gitdb/__init__.py +38 -0
  7. valley/lib/python3.10/site-packages/gitdb/__pycache__/__init__.cpython-310.pyc +0 -0
  8. valley/lib/python3.10/site-packages/gitdb/__pycache__/base.cpython-310.pyc +0 -0
  9. valley/lib/python3.10/site-packages/gitdb/__pycache__/const.cpython-310.pyc +0 -0
  10. valley/lib/python3.10/site-packages/gitdb/__pycache__/exc.cpython-310.pyc +0 -0
  11. valley/lib/python3.10/site-packages/gitdb/__pycache__/fun.cpython-310.pyc +0 -0
  12. valley/lib/python3.10/site-packages/gitdb/__pycache__/pack.cpython-310.pyc +0 -0
  13. valley/lib/python3.10/site-packages/gitdb/__pycache__/stream.cpython-310.pyc +0 -0
  14. valley/lib/python3.10/site-packages/gitdb/__pycache__/typ.cpython-310.pyc +0 -0
  15. valley/lib/python3.10/site-packages/gitdb/__pycache__/util.cpython-310.pyc +0 -0
  16. valley/lib/python3.10/site-packages/gitdb/db/__init__.py +11 -0
  17. valley/lib/python3.10/site-packages/gitdb/db/__pycache__/__init__.cpython-310.pyc +0 -0
  18. valley/lib/python3.10/site-packages/gitdb/db/__pycache__/base.cpython-310.pyc +0 -0
  19. valley/lib/python3.10/site-packages/gitdb/db/__pycache__/git.cpython-310.pyc +0 -0
  20. valley/lib/python3.10/site-packages/gitdb/db/__pycache__/loose.cpython-310.pyc +0 -0
  21. valley/lib/python3.10/site-packages/gitdb/db/__pycache__/mem.cpython-310.pyc +0 -0
  22. valley/lib/python3.10/site-packages/gitdb/db/__pycache__/pack.cpython-310.pyc +0 -0
  23. valley/lib/python3.10/site-packages/gitdb/db/__pycache__/ref.cpython-310.pyc +0 -0
  24. valley/lib/python3.10/site-packages/gitdb/db/base.py +278 -0
  25. valley/lib/python3.10/site-packages/gitdb/db/git.py +85 -0
  26. valley/lib/python3.10/site-packages/gitdb/db/loose.py +254 -0
  27. valley/lib/python3.10/site-packages/gitdb/db/mem.py +110 -0
  28. valley/lib/python3.10/site-packages/gitdb/db/pack.py +206 -0
  29. valley/lib/python3.10/site-packages/gitdb/test/__init__.py +4 -0
  30. valley/lib/python3.10/site-packages/gitdb/test/__pycache__/__init__.cpython-310.pyc +0 -0
  31. valley/lib/python3.10/site-packages/gitdb/test/__pycache__/lib.cpython-310.pyc +0 -0
  32. valley/lib/python3.10/site-packages/gitdb/test/__pycache__/test_base.cpython-310.pyc +0 -0
  33. valley/lib/python3.10/site-packages/gitdb/test/__pycache__/test_example.cpython-310.pyc +0 -0
  34. valley/lib/python3.10/site-packages/gitdb/test/__pycache__/test_pack.cpython-310.pyc +0 -0
  35. valley/lib/python3.10/site-packages/gitdb/test/__pycache__/test_stream.cpython-310.pyc +0 -0
  36. valley/lib/python3.10/site-packages/gitdb/test/__pycache__/test_util.cpython-310.pyc +0 -0
  37. valley/lib/python3.10/site-packages/gitdb/test/lib.py +192 -0
  38. valley/lib/python3.10/site-packages/gitdb/test/test_base.py +105 -0
  39. valley/lib/python3.10/site-packages/gitdb/test/test_example.py +43 -0
  40. valley/lib/python3.10/site-packages/gitdb/test/test_pack.py +249 -0
  41. valley/lib/python3.10/site-packages/gitdb/test/test_stream.py +164 -0
  42. valley/lib/python3.10/site-packages/gitdb/test/test_util.py +100 -0
  43. valley/lib/python3.10/site-packages/gitdb/utils/__init__.py +0 -0
  44. valley/lib/python3.10/site-packages/gitdb/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  45. valley/lib/python3.10/site-packages/gitdb/utils/__pycache__/encoding.cpython-310.pyc +0 -0
  46. valley/lib/python3.10/site-packages/gitdb/utils/encoding.py +18 -0
  47. valley/lib/python3.10/site-packages/huggingface_hub/__init__.py +968 -0
  48. valley/lib/python3.10/site-packages/huggingface_hub/_commit_scheduler.py +327 -0
  49. valley/lib/python3.10/site-packages/huggingface_hub/_tensorboard_logger.py +195 -0
  50. valley/lib/python3.10/site-packages/huggingface_hub/_webhooks_server.py +386 -0
.gitattributes CHANGED
@@ -588,3 +588,8 @@ valley/bin/x86_64-conda_cos7-linux-gnu-ld filter=lfs diff=lfs merge=lfs -text
588
  valley/lib/libncursesw.a filter=lfs diff=lfs merge=lfs -text
589
  valley/compiler_compat/ld filter=lfs diff=lfs merge=lfs -text
590
  valley/lib/libatomic.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
588
  valley/lib/libncursesw.a filter=lfs diff=lfs merge=lfs -text
589
  valley/compiler_compat/ld filter=lfs diff=lfs merge=lfs -text
590
  valley/lib/libatomic.so filter=lfs diff=lfs merge=lfs -text
591
+ wemm/bin/bzcat filter=lfs diff=lfs merge=lfs -text
592
+ wemm/compiler_compat/ld filter=lfs diff=lfs merge=lfs -text
593
+ wemm/bin/sqlite3 filter=lfs diff=lfs merge=lfs -text
594
+ wemm/bin/x86_64-conda_cos7-linux-gnu-ld filter=lfs diff=lfs merge=lfs -text
595
+ wemm/bin/bunzip2 filter=lfs diff=lfs merge=lfs -text
valley/lib/python3.10/site-packages/annotated_types/__init__.py ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import sys
3
+ import types
4
+ from dataclasses import dataclass
5
+ from datetime import tzinfo
6
+ from typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, SupportsFloat, SupportsIndex, TypeVar, Union
7
+
8
+ if sys.version_info < (3, 8):
9
+ from typing_extensions import Protocol, runtime_checkable
10
+ else:
11
+ from typing import Protocol, runtime_checkable
12
+
13
+ if sys.version_info < (3, 9):
14
+ from typing_extensions import Annotated, Literal
15
+ else:
16
+ from typing import Annotated, Literal
17
+
18
+ if sys.version_info < (3, 10):
19
+ EllipsisType = type(Ellipsis)
20
+ KW_ONLY = {}
21
+ SLOTS = {}
22
+ else:
23
+ from types import EllipsisType
24
+
25
+ KW_ONLY = {"kw_only": True}
26
+ SLOTS = {"slots": True}
27
+
28
+
29
+ __all__ = (
30
+ 'BaseMetadata',
31
+ 'GroupedMetadata',
32
+ 'Gt',
33
+ 'Ge',
34
+ 'Lt',
35
+ 'Le',
36
+ 'Interval',
37
+ 'MultipleOf',
38
+ 'MinLen',
39
+ 'MaxLen',
40
+ 'Len',
41
+ 'Timezone',
42
+ 'Predicate',
43
+ 'LowerCase',
44
+ 'UpperCase',
45
+ 'IsDigits',
46
+ 'IsFinite',
47
+ 'IsNotFinite',
48
+ 'IsNan',
49
+ 'IsNotNan',
50
+ 'IsInfinite',
51
+ 'IsNotInfinite',
52
+ 'doc',
53
+ 'DocInfo',
54
+ '__version__',
55
+ )
56
+
57
+ __version__ = '0.7.0'
58
+
59
+
60
+ T = TypeVar('T')
61
+
62
+
63
+ # arguments that start with __ are considered
64
+ # positional only
65
+ # see https://peps.python.org/pep-0484/#positional-only-arguments
66
+
67
+
68
+ class SupportsGt(Protocol):
69
+ def __gt__(self: T, __other: T) -> bool:
70
+ ...
71
+
72
+
73
+ class SupportsGe(Protocol):
74
+ def __ge__(self: T, __other: T) -> bool:
75
+ ...
76
+
77
+
78
+ class SupportsLt(Protocol):
79
+ def __lt__(self: T, __other: T) -> bool:
80
+ ...
81
+
82
+
83
+ class SupportsLe(Protocol):
84
+ def __le__(self: T, __other: T) -> bool:
85
+ ...
86
+
87
+
88
+ class SupportsMod(Protocol):
89
+ def __mod__(self: T, __other: T) -> T:
90
+ ...
91
+
92
+
93
+ class SupportsDiv(Protocol):
94
+ def __div__(self: T, __other: T) -> T:
95
+ ...
96
+
97
+
98
+ class BaseMetadata:
99
+ """Base class for all metadata.
100
+
101
+ This exists mainly so that implementers
102
+ can do `isinstance(..., BaseMetadata)` while traversing field annotations.
103
+ """
104
+
105
+ __slots__ = ()
106
+
107
+
108
+ @dataclass(frozen=True, **SLOTS)
109
+ class Gt(BaseMetadata):
110
+ """Gt(gt=x) implies that the value must be greater than x.
111
+
112
+ It can be used with any type that supports the ``>`` operator,
113
+ including numbers, dates and times, strings, sets, and so on.
114
+ """
115
+
116
+ gt: SupportsGt
117
+
118
+
119
+ @dataclass(frozen=True, **SLOTS)
120
+ class Ge(BaseMetadata):
121
+ """Ge(ge=x) implies that the value must be greater than or equal to x.
122
+
123
+ It can be used with any type that supports the ``>=`` operator,
124
+ including numbers, dates and times, strings, sets, and so on.
125
+ """
126
+
127
+ ge: SupportsGe
128
+
129
+
130
+ @dataclass(frozen=True, **SLOTS)
131
+ class Lt(BaseMetadata):
132
+ """Lt(lt=x) implies that the value must be less than x.
133
+
134
+ It can be used with any type that supports the ``<`` operator,
135
+ including numbers, dates and times, strings, sets, and so on.
136
+ """
137
+
138
+ lt: SupportsLt
139
+
140
+
141
+ @dataclass(frozen=True, **SLOTS)
142
+ class Le(BaseMetadata):
143
+ """Le(le=x) implies that the value must be less than or equal to x.
144
+
145
+ It can be used with any type that supports the ``<=`` operator,
146
+ including numbers, dates and times, strings, sets, and so on.
147
+ """
148
+
149
+ le: SupportsLe
150
+
151
+
152
+ @runtime_checkable
153
+ class GroupedMetadata(Protocol):
154
+ """A grouping of multiple objects, like typing.Unpack.
155
+
156
+ `GroupedMetadata` on its own is not metadata and has no meaning.
157
+ All of the constraints and metadata should be fully expressable
158
+ in terms of the `BaseMetadata`'s returned by `GroupedMetadata.__iter__()`.
159
+
160
+ Concrete implementations should override `GroupedMetadata.__iter__()`
161
+ to add their own metadata.
162
+ For example:
163
+
164
+ >>> @dataclass
165
+ >>> class Field(GroupedMetadata):
166
+ >>> gt: float | None = None
167
+ >>> description: str | None = None
168
+ ...
169
+ >>> def __iter__(self) -> Iterable[object]:
170
+ >>> if self.gt is not None:
171
+ >>> yield Gt(self.gt)
172
+ >>> if self.description is not None:
173
+ >>> yield Description(self.gt)
174
+
175
+ Also see the implementation of `Interval` below for an example.
176
+
177
+ Parsers should recognize this and unpack it so that it can be used
178
+ both with and without unpacking:
179
+
180
+ - `Annotated[int, Field(...)]` (parser must unpack Field)
181
+ - `Annotated[int, *Field(...)]` (PEP-646)
182
+ """ # noqa: trailing-whitespace
183
+
184
+ @property
185
+ def __is_annotated_types_grouped_metadata__(self) -> Literal[True]:
186
+ return True
187
+
188
+ def __iter__(self) -> Iterator[object]:
189
+ ...
190
+
191
+ if not TYPE_CHECKING:
192
+ __slots__ = () # allow subclasses to use slots
193
+
194
+ def __init_subclass__(cls, *args: Any, **kwargs: Any) -> None:
195
+ # Basic ABC like functionality without the complexity of an ABC
196
+ super().__init_subclass__(*args, **kwargs)
197
+ if cls.__iter__ is GroupedMetadata.__iter__:
198
+ raise TypeError("Can't subclass GroupedMetadata without implementing __iter__")
199
+
200
+ def __iter__(self) -> Iterator[object]: # noqa: F811
201
+ raise NotImplementedError # more helpful than "None has no attribute..." type errors
202
+
203
+
204
+ @dataclass(frozen=True, **KW_ONLY, **SLOTS)
205
+ class Interval(GroupedMetadata):
206
+ """Interval can express inclusive or exclusive bounds with a single object.
207
+
208
+ It accepts keyword arguments ``gt``, ``ge``, ``lt``, and/or ``le``, which
209
+ are interpreted the same way as the single-bound constraints.
210
+ """
211
+
212
+ gt: Union[SupportsGt, None] = None
213
+ ge: Union[SupportsGe, None] = None
214
+ lt: Union[SupportsLt, None] = None
215
+ le: Union[SupportsLe, None] = None
216
+
217
+ def __iter__(self) -> Iterator[BaseMetadata]:
218
+ """Unpack an Interval into zero or more single-bounds."""
219
+ if self.gt is not None:
220
+ yield Gt(self.gt)
221
+ if self.ge is not None:
222
+ yield Ge(self.ge)
223
+ if self.lt is not None:
224
+ yield Lt(self.lt)
225
+ if self.le is not None:
226
+ yield Le(self.le)
227
+
228
+
229
+ @dataclass(frozen=True, **SLOTS)
230
+ class MultipleOf(BaseMetadata):
231
+ """MultipleOf(multiple_of=x) might be interpreted in two ways:
232
+
233
+ 1. Python semantics, implying ``value % multiple_of == 0``, or
234
+ 2. JSONschema semantics, where ``int(value / multiple_of) == value / multiple_of``
235
+
236
+ We encourage users to be aware of these two common interpretations,
237
+ and libraries to carefully document which they implement.
238
+ """
239
+
240
+ multiple_of: Union[SupportsDiv, SupportsMod]
241
+
242
+
243
+ @dataclass(frozen=True, **SLOTS)
244
+ class MinLen(BaseMetadata):
245
+ """
246
+ MinLen() implies minimum inclusive length,
247
+ e.g. ``len(value) >= min_length``.
248
+ """
249
+
250
+ min_length: Annotated[int, Ge(0)]
251
+
252
+
253
+ @dataclass(frozen=True, **SLOTS)
254
+ class MaxLen(BaseMetadata):
255
+ """
256
+ MaxLen() implies maximum inclusive length,
257
+ e.g. ``len(value) <= max_length``.
258
+ """
259
+
260
+ max_length: Annotated[int, Ge(0)]
261
+
262
+
263
+ @dataclass(frozen=True, **SLOTS)
264
+ class Len(GroupedMetadata):
265
+ """
266
+ Len() implies that ``min_length <= len(value) <= max_length``.
267
+
268
+ Upper bound may be omitted or ``None`` to indicate no upper length bound.
269
+ """
270
+
271
+ min_length: Annotated[int, Ge(0)] = 0
272
+ max_length: Optional[Annotated[int, Ge(0)]] = None
273
+
274
+ def __iter__(self) -> Iterator[BaseMetadata]:
275
+ """Unpack a Len into zone or more single-bounds."""
276
+ if self.min_length > 0:
277
+ yield MinLen(self.min_length)
278
+ if self.max_length is not None:
279
+ yield MaxLen(self.max_length)
280
+
281
+
282
+ @dataclass(frozen=True, **SLOTS)
283
+ class Timezone(BaseMetadata):
284
+ """Timezone(tz=...) requires a datetime to be aware (or ``tz=None``, naive).
285
+
286
+ ``Annotated[datetime, Timezone(None)]`` must be a naive datetime.
287
+ ``Timezone[...]`` (the ellipsis literal) expresses that the datetime must be
288
+ tz-aware but any timezone is allowed.
289
+
290
+ You may also pass a specific timezone string or tzinfo object such as
291
+ ``Timezone(timezone.utc)`` or ``Timezone("Africa/Abidjan")`` to express that
292
+ you only allow a specific timezone, though we note that this is often
293
+ a symptom of poor design.
294
+ """
295
+
296
+ tz: Union[str, tzinfo, EllipsisType, None]
297
+
298
+
299
+ @dataclass(frozen=True, **SLOTS)
300
+ class Unit(BaseMetadata):
301
+ """Indicates that the value is a physical quantity with the specified unit.
302
+
303
+ It is intended for usage with numeric types, where the value represents the
304
+ magnitude of the quantity. For example, ``distance: Annotated[float, Unit('m')]``
305
+ or ``speed: Annotated[float, Unit('m/s')]``.
306
+
307
+ Interpretation of the unit string is left to the discretion of the consumer.
308
+ It is suggested to follow conventions established by python libraries that work
309
+ with physical quantities, such as
310
+
311
+ - ``pint`` : <https://pint.readthedocs.io/en/stable/>
312
+ - ``astropy.units``: <https://docs.astropy.org/en/stable/units/>
313
+
314
+ For indicating a quantity with a certain dimensionality but without a specific unit
315
+ it is recommended to use square brackets, e.g. `Annotated[float, Unit('[time]')]`.
316
+ Note, however, ``annotated_types`` itself makes no use of the unit string.
317
+ """
318
+
319
+ unit: str
320
+
321
+
322
+ @dataclass(frozen=True, **SLOTS)
323
+ class Predicate(BaseMetadata):
324
+ """``Predicate(func: Callable)`` implies `func(value)` is truthy for valid values.
325
+
326
+ Users should prefer statically inspectable metadata, but if you need the full
327
+ power and flexibility of arbitrary runtime predicates... here it is.
328
+
329
+ We provide a few predefined predicates for common string constraints:
330
+ ``IsLower = Predicate(str.islower)``, ``IsUpper = Predicate(str.isupper)``, and
331
+ ``IsDigits = Predicate(str.isdigit)``. Users are encouraged to use methods which
332
+ can be given special handling, and avoid indirection like ``lambda s: s.lower()``.
333
+
334
+ Some libraries might have special logic to handle certain predicates, e.g. by
335
+ checking for `str.isdigit` and using its presence to both call custom logic to
336
+ enforce digit-only strings, and customise some generated external schema.
337
+
338
+ We do not specify what behaviour should be expected for predicates that raise
339
+ an exception. For example `Annotated[int, Predicate(str.isdigit)]` might silently
340
+ skip invalid constraints, or statically raise an error; or it might try calling it
341
+ and then propagate or discard the resulting exception.
342
+ """
343
+
344
+ func: Callable[[Any], bool]
345
+
346
+ def __repr__(self) -> str:
347
+ if getattr(self.func, "__name__", "<lambda>") == "<lambda>":
348
+ return f"{self.__class__.__name__}({self.func!r})"
349
+ if isinstance(self.func, (types.MethodType, types.BuiltinMethodType)) and (
350
+ namespace := getattr(self.func.__self__, "__name__", None)
351
+ ):
352
+ return f"{self.__class__.__name__}({namespace}.{self.func.__name__})"
353
+ if isinstance(self.func, type(str.isascii)): # method descriptor
354
+ return f"{self.__class__.__name__}({self.func.__qualname__})"
355
+ return f"{self.__class__.__name__}({self.func.__name__})"
356
+
357
+
358
+ @dataclass
359
+ class Not:
360
+ func: Callable[[Any], bool]
361
+
362
+ def __call__(self, __v: Any) -> bool:
363
+ return not self.func(__v)
364
+
365
+
366
+ _StrType = TypeVar("_StrType", bound=str)
367
+
368
+ LowerCase = Annotated[_StrType, Predicate(str.islower)]
369
+ """
370
+ Return True if the string is a lowercase string, False otherwise.
371
+
372
+ A string is lowercase if all cased characters in the string are lowercase and there is at least one cased character in the string.
373
+ """ # noqa: E501
374
+ UpperCase = Annotated[_StrType, Predicate(str.isupper)]
375
+ """
376
+ Return True if the string is an uppercase string, False otherwise.
377
+
378
+ A string is uppercase if all cased characters in the string are uppercase and there is at least one cased character in the string.
379
+ """ # noqa: E501
380
+ IsDigit = Annotated[_StrType, Predicate(str.isdigit)]
381
+ IsDigits = IsDigit # type: ignore # plural for backwards compatibility, see #63
382
+ """
383
+ Return True if the string is a digit string, False otherwise.
384
+
385
+ A string is a digit string if all characters in the string are digits and there is at least one character in the string.
386
+ """ # noqa: E501
387
+ IsAscii = Annotated[_StrType, Predicate(str.isascii)]
388
+ """
389
+ Return True if all characters in the string are ASCII, False otherwise.
390
+
391
+ ASCII characters have code points in the range U+0000-U+007F. Empty string is ASCII too.
392
+ """
393
+
394
+ _NumericType = TypeVar('_NumericType', bound=Union[SupportsFloat, SupportsIndex])
395
+ IsFinite = Annotated[_NumericType, Predicate(math.isfinite)]
396
+ """Return True if x is neither an infinity nor a NaN, and False otherwise."""
397
+ IsNotFinite = Annotated[_NumericType, Predicate(Not(math.isfinite))]
398
+ """Return True if x is one of infinity or NaN, and False otherwise"""
399
+ IsNan = Annotated[_NumericType, Predicate(math.isnan)]
400
+ """Return True if x is a NaN (not a number), and False otherwise."""
401
+ IsNotNan = Annotated[_NumericType, Predicate(Not(math.isnan))]
402
+ """Return True if x is anything but NaN (not a number), and False otherwise."""
403
+ IsInfinite = Annotated[_NumericType, Predicate(math.isinf)]
404
+ """Return True if x is a positive or negative infinity, and False otherwise."""
405
+ IsNotInfinite = Annotated[_NumericType, Predicate(Not(math.isinf))]
406
+ """Return True if x is neither a positive or negative infinity, and False otherwise."""
407
+
408
+ try:
409
+ from typing_extensions import DocInfo, doc # type: ignore [attr-defined]
410
+ except ImportError:
411
+
412
+ @dataclass(frozen=True, **SLOTS)
413
+ class DocInfo: # type: ignore [no-redef]
414
+ """ "
415
+ The return value of doc(), mainly to be used by tools that want to extract the
416
+ Annotated documentation at runtime.
417
+ """
418
+
419
+ documentation: str
420
+ """The documentation string passed to doc()."""
421
+
422
+ def doc(
423
+ documentation: str,
424
+ ) -> DocInfo:
425
+ """
426
+ Add documentation to a type annotation inside of Annotated.
427
+
428
+ For example:
429
+
430
+ >>> def hi(name: Annotated[int, doc("The name of the user")]) -> None: ...
431
+ """
432
+ return DocInfo(documentation)
valley/lib/python3.10/site-packages/annotated_types/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
valley/lib/python3.10/site-packages/annotated_types/__pycache__/test_cases.cpython-310.pyc ADDED
Binary file (5.62 kB). View file
 
valley/lib/python3.10/site-packages/annotated_types/py.typed ADDED
File without changes
valley/lib/python3.10/site-packages/gitdb/__init__.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ """Initialize the object database module"""
6
+
7
+ import sys
8
+ import os
9
+
10
+ #{ Initialization
11
+
12
+
13
+ def _init_externals():
14
+ """Initialize external projects by putting them into the path"""
15
+ if 'PYOXIDIZER' not in os.environ:
16
+ where = os.path.join(os.path.dirname(__file__), 'ext', 'smmap')
17
+ if os.path.exists(where):
18
+ sys.path.append(where)
19
+
20
+ import smmap
21
+ del smmap
22
+ # END handle imports
23
+
24
+ #} END initialization
25
+
26
+ _init_externals()
27
+
28
+ __author__ = "Sebastian Thiel"
29
+ __contact__ = "byronimo@gmail.com"
30
+ __homepage__ = "https://github.com/gitpython-developers/gitdb"
31
+ version_info = (4, 0, 11)
32
+ __version__ = '.'.join(str(i) for i in version_info)
33
+
34
+
35
+ # default imports
36
+ from gitdb.base import *
37
+ from gitdb.db import *
38
+ from gitdb.stream import *
valley/lib/python3.10/site-packages/gitdb/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.01 kB). View file
 
valley/lib/python3.10/site-packages/gitdb/__pycache__/base.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
valley/lib/python3.10/site-packages/gitdb/__pycache__/const.cpython-310.pyc ADDED
Binary file (287 Bytes). View file
 
valley/lib/python3.10/site-packages/gitdb/__pycache__/exc.cpython-310.pyc ADDED
Binary file (2.17 kB). View file
 
valley/lib/python3.10/site-packages/gitdb/__pycache__/fun.cpython-310.pyc ADDED
Binary file (16.9 kB). View file
 
valley/lib/python3.10/site-packages/gitdb/__pycache__/pack.cpython-310.pyc ADDED
Binary file (30.3 kB). View file
 
valley/lib/python3.10/site-packages/gitdb/__pycache__/stream.cpython-310.pyc ADDED
Binary file (17.9 kB). View file
 
valley/lib/python3.10/site-packages/gitdb/__pycache__/typ.cpython-310.pyc ADDED
Binary file (349 Bytes). View file
 
valley/lib/python3.10/site-packages/gitdb/__pycache__/util.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
valley/lib/python3.10/site-packages/gitdb/db/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+
6
+ from gitdb.db.base import *
7
+ from gitdb.db.loose import *
8
+ from gitdb.db.mem import *
9
+ from gitdb.db.pack import *
10
+ from gitdb.db.git import *
11
+ from gitdb.db.ref import *
valley/lib/python3.10/site-packages/gitdb/db/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (313 Bytes). View file
 
valley/lib/python3.10/site-packages/gitdb/db/__pycache__/base.cpython-310.pyc ADDED
Binary file (9.68 kB). View file
 
valley/lib/python3.10/site-packages/gitdb/db/__pycache__/git.cpython-310.pyc ADDED
Binary file (2.31 kB). View file
 
valley/lib/python3.10/site-packages/gitdb/db/__pycache__/loose.cpython-310.pyc ADDED
Binary file (5.71 kB). View file
 
valley/lib/python3.10/site-packages/gitdb/db/__pycache__/mem.cpython-310.pyc ADDED
Binary file (3.29 kB). View file
 
valley/lib/python3.10/site-packages/gitdb/db/__pycache__/pack.cpython-310.pyc ADDED
Binary file (6.13 kB). View file
 
valley/lib/python3.10/site-packages/gitdb/db/__pycache__/ref.cpython-310.pyc ADDED
Binary file (2.37 kB). View file
 
valley/lib/python3.10/site-packages/gitdb/db/base.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ """Contains implementations of database retrieveing objects"""
6
+ from gitdb.util import (
7
+ join,
8
+ LazyMixin,
9
+ hex_to_bin
10
+ )
11
+
12
+ from gitdb.utils.encoding import force_text
13
+ from gitdb.exc import (
14
+ BadObject,
15
+ AmbiguousObjectName
16
+ )
17
+
18
+ from itertools import chain
19
+ from functools import reduce
20
+
21
+
22
+ __all__ = ('ObjectDBR', 'ObjectDBW', 'FileDBBase', 'CompoundDB', 'CachingDB')
23
+
24
+
25
+ class ObjectDBR:
26
+
27
+ """Defines an interface for object database lookup.
28
+ Objects are identified either by their 20 byte bin sha"""
29
+
30
+ def __contains__(self, sha):
31
+ return self.has_obj
32
+
33
+ #{ Query Interface
34
+ def has_object(self, sha):
35
+ """
36
+ Whether the object identified by the given 20 bytes
37
+ binary sha is contained in the database
38
+
39
+ :return: True if the object identified by the given 20 bytes
40
+ binary sha is contained in the database"""
41
+ raise NotImplementedError("To be implemented in subclass")
42
+
43
+ def info(self, sha):
44
+ """ :return: OInfo instance
45
+ :param sha: bytes binary sha
46
+ :raise BadObject:"""
47
+ raise NotImplementedError("To be implemented in subclass")
48
+
49
+ def stream(self, sha):
50
+ """:return: OStream instance
51
+ :param sha: 20 bytes binary sha
52
+ :raise BadObject:"""
53
+ raise NotImplementedError("To be implemented in subclass")
54
+
55
+ def size(self):
56
+ """:return: amount of objects in this database"""
57
+ raise NotImplementedError()
58
+
59
+ def sha_iter(self):
60
+ """Return iterator yielding 20 byte shas for all objects in this data base"""
61
+ raise NotImplementedError()
62
+
63
+ #} END query interface
64
+
65
+
66
+ class ObjectDBW:
67
+
68
+ """Defines an interface to create objects in the database"""
69
+
70
+ def __init__(self, *args, **kwargs):
71
+ self._ostream = None
72
+
73
+ #{ Edit Interface
74
+ def set_ostream(self, stream):
75
+ """
76
+ Adjusts the stream to which all data should be sent when storing new objects
77
+
78
+ :param stream: if not None, the stream to use, if None the default stream
79
+ will be used.
80
+ :return: previously installed stream, or None if there was no override
81
+ :raise TypeError: if the stream doesn't have the supported functionality"""
82
+ cstream = self._ostream
83
+ self._ostream = stream
84
+ return cstream
85
+
86
+ def ostream(self):
87
+ """
88
+ Return the output stream
89
+
90
+ :return: overridden output stream this instance will write to, or None
91
+ if it will write to the default stream"""
92
+ return self._ostream
93
+
94
+ def store(self, istream):
95
+ """
96
+ Create a new object in the database
97
+ :return: the input istream object with its sha set to its corresponding value
98
+
99
+ :param istream: IStream compatible instance. If its sha is already set
100
+ to a value, the object will just be stored in the our database format,
101
+ in which case the input stream is expected to be in object format ( header + contents ).
102
+ :raise IOError: if data could not be written"""
103
+ raise NotImplementedError("To be implemented in subclass")
104
+
105
+ #} END edit interface
106
+
107
+
108
+ class FileDBBase:
109
+
110
+ """Provides basic facilities to retrieve files of interest, including
111
+ caching facilities to help mapping hexsha's to objects"""
112
+
113
+ def __init__(self, root_path):
114
+ """Initialize this instance to look for its files at the given root path
115
+ All subsequent operations will be relative to this path
116
+ :raise InvalidDBRoot:
117
+ **Note:** The base will not perform any accessablity checking as the base
118
+ might not yet be accessible, but become accessible before the first
119
+ access."""
120
+ super().__init__()
121
+ self._root_path = root_path
122
+
123
+ #{ Interface
124
+ def root_path(self):
125
+ """:return: path at which this db operates"""
126
+ return self._root_path
127
+
128
+ def db_path(self, rela_path):
129
+ """
130
+ :return: the given relative path relative to our database root, allowing
131
+ to pontentially access datafiles"""
132
+ return join(self._root_path, force_text(rela_path))
133
+ #} END interface
134
+
135
+
136
+ class CachingDB:
137
+
138
+ """A database which uses caches to speed-up access"""
139
+
140
+ #{ Interface
141
+ def update_cache(self, force=False):
142
+ """
143
+ Call this method if the underlying data changed to trigger an update
144
+ of the internal caching structures.
145
+
146
+ :param force: if True, the update must be performed. Otherwise the implementation
147
+ may decide not to perform an update if it thinks nothing has changed.
148
+ :return: True if an update was performed as something change indeed"""
149
+
150
+ # END interface
151
+
152
+
153
+ def _databases_recursive(database, output):
154
+ """Fill output list with database from db, in order. Deals with Loose, Packed
155
+ and compound databases."""
156
+ if isinstance(database, CompoundDB):
157
+ dbs = database.databases()
158
+ output.extend(db for db in dbs if not isinstance(db, CompoundDB))
159
+ for cdb in (db for db in dbs if isinstance(db, CompoundDB)):
160
+ _databases_recursive(cdb, output)
161
+ else:
162
+ output.append(database)
163
+ # END handle database type
164
+
165
+
166
+ class CompoundDB(ObjectDBR, LazyMixin, CachingDB):
167
+
168
+ """A database which delegates calls to sub-databases.
169
+
170
+ Databases are stored in the lazy-loaded _dbs attribute.
171
+ Define _set_cache_ to update it with your databases"""
172
+
173
+ def _set_cache_(self, attr):
174
+ if attr == '_dbs':
175
+ self._dbs = list()
176
+ elif attr == '_db_cache':
177
+ self._db_cache = dict()
178
+ else:
179
+ super()._set_cache_(attr)
180
+
181
+ def _db_query(self, sha):
182
+ """:return: database containing the given 20 byte sha
183
+ :raise BadObject:"""
184
+ # most databases use binary representations, prevent converting
185
+ # it every time a database is being queried
186
+ try:
187
+ return self._db_cache[sha]
188
+ except KeyError:
189
+ pass
190
+ # END first level cache
191
+
192
+ for db in self._dbs:
193
+ if db.has_object(sha):
194
+ self._db_cache[sha] = db
195
+ return db
196
+ # END for each database
197
+ raise BadObject(sha)
198
+
199
+ #{ ObjectDBR interface
200
+
201
+ def has_object(self, sha):
202
+ try:
203
+ self._db_query(sha)
204
+ return True
205
+ except BadObject:
206
+ return False
207
+ # END handle exceptions
208
+
209
+ def info(self, sha):
210
+ return self._db_query(sha).info(sha)
211
+
212
+ def stream(self, sha):
213
+ return self._db_query(sha).stream(sha)
214
+
215
+ def size(self):
216
+ """:return: total size of all contained databases"""
217
+ return reduce(lambda x, y: x + y, (db.size() for db in self._dbs), 0)
218
+
219
+ def sha_iter(self):
220
+ return chain(*(db.sha_iter() for db in self._dbs))
221
+
222
+ #} END object DBR Interface
223
+
224
+ #{ Interface
225
+
226
+ def databases(self):
227
+ """:return: tuple of database instances we use for lookups"""
228
+ return tuple(self._dbs)
229
+
230
+ def update_cache(self, force=False):
231
+ # something might have changed, clear everything
232
+ self._db_cache.clear()
233
+ stat = False
234
+ for db in self._dbs:
235
+ if isinstance(db, CachingDB):
236
+ stat |= db.update_cache(force)
237
+ # END if is caching db
238
+ # END for each database to update
239
+ return stat
240
+
241
+ def partial_to_complete_sha_hex(self, partial_hexsha):
242
+ """
243
+ :return: 20 byte binary sha1 from the given less-than-40 byte hexsha (bytes or str)
244
+ :param partial_hexsha: hexsha with less than 40 byte
245
+ :raise AmbiguousObjectName: """
246
+ databases = list()
247
+ _databases_recursive(self, databases)
248
+ partial_hexsha = force_text(partial_hexsha)
249
+ len_partial_hexsha = len(partial_hexsha)
250
+ if len_partial_hexsha % 2 != 0:
251
+ partial_binsha = hex_to_bin(partial_hexsha + "0")
252
+ else:
253
+ partial_binsha = hex_to_bin(partial_hexsha)
254
+ # END assure successful binary conversion
255
+
256
+ candidate = None
257
+ for db in databases:
258
+ full_bin_sha = None
259
+ try:
260
+ if hasattr(db, 'partial_to_complete_sha_hex'):
261
+ full_bin_sha = db.partial_to_complete_sha_hex(partial_hexsha)
262
+ else:
263
+ full_bin_sha = db.partial_to_complete_sha(partial_binsha, len_partial_hexsha)
264
+ # END handle database type
265
+ except BadObject:
266
+ continue
267
+ # END ignore bad objects
268
+ if full_bin_sha:
269
+ if candidate and candidate != full_bin_sha:
270
+ raise AmbiguousObjectName(partial_hexsha)
271
+ candidate = full_bin_sha
272
+ # END handle candidate
273
+ # END for each db
274
+ if not candidate:
275
+ raise BadObject(partial_binsha)
276
+ return candidate
277
+
278
+ #} END interface
valley/lib/python3.10/site-packages/gitdb/db/git.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ from gitdb.db.base import (
6
+ CompoundDB,
7
+ ObjectDBW,
8
+ FileDBBase
9
+ )
10
+
11
+ from gitdb.db.loose import LooseObjectDB
12
+ from gitdb.db.pack import PackedDB
13
+ from gitdb.db.ref import ReferenceDB
14
+
15
+ from gitdb.exc import InvalidDBRoot
16
+
17
+ import os
18
+
19
+ __all__ = ('GitDB', )
20
+
21
+
22
+ class GitDB(FileDBBase, ObjectDBW, CompoundDB):
23
+
24
+ """A git-style object database, which contains all objects in the 'objects'
25
+ subdirectory
26
+
27
+ ``IMPORTANT``: The usage of this implementation is highly discouraged as it fails to release file-handles.
28
+ This can be a problem with long-running processes and/or big repositories.
29
+ """
30
+ # Configuration
31
+ PackDBCls = PackedDB
32
+ LooseDBCls = LooseObjectDB
33
+ ReferenceDBCls = ReferenceDB
34
+
35
+ # Directories
36
+ packs_dir = 'pack'
37
+ loose_dir = ''
38
+ alternates_dir = os.path.join('info', 'alternates')
39
+
40
+ def __init__(self, root_path):
41
+ """Initialize ourselves on a git objects directory"""
42
+ super().__init__(root_path)
43
+
44
+ def _set_cache_(self, attr):
45
+ if attr == '_dbs' or attr == '_loose_db':
46
+ self._dbs = list()
47
+ loose_db = None
48
+ for subpath, dbcls in ((self.packs_dir, self.PackDBCls),
49
+ (self.loose_dir, self.LooseDBCls),
50
+ (self.alternates_dir, self.ReferenceDBCls)):
51
+ path = self.db_path(subpath)
52
+ if os.path.exists(path):
53
+ self._dbs.append(dbcls(path))
54
+ if dbcls is self.LooseDBCls:
55
+ loose_db = self._dbs[-1]
56
+ # END remember loose db
57
+ # END check path exists
58
+ # END for each db type
59
+
60
+ # should have at least one subdb
61
+ if not self._dbs:
62
+ raise InvalidDBRoot(self.root_path())
63
+ # END handle error
64
+
65
+ # we the first one should have the store method
66
+ assert loose_db is not None and hasattr(loose_db, 'store'), "First database needs store functionality"
67
+
68
+ # finally set the value
69
+ self._loose_db = loose_db
70
+ else:
71
+ super()._set_cache_(attr)
72
+ # END handle attrs
73
+
74
+ #{ ObjectDBW interface
75
+
76
+ def store(self, istream):
77
+ return self._loose_db.store(istream)
78
+
79
+ def ostream(self):
80
+ return self._loose_db.ostream()
81
+
82
+ def set_ostream(self, ostream):
83
+ return self._loose_db.set_ostream(ostream)
84
+
85
+ #} END objectdbw interface
valley/lib/python3.10/site-packages/gitdb/db/loose.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ from gitdb.db.base import (
6
+ FileDBBase,
7
+ ObjectDBR,
8
+ ObjectDBW
9
+ )
10
+
11
+ from gitdb.exc import (
12
+ BadObject,
13
+ AmbiguousObjectName
14
+ )
15
+
16
+ from gitdb.stream import (
17
+ DecompressMemMapReader,
18
+ FDCompressedSha1Writer,
19
+ FDStream,
20
+ Sha1Writer
21
+ )
22
+
23
+ from gitdb.base import (
24
+ OStream,
25
+ OInfo
26
+ )
27
+
28
+ from gitdb.util import (
29
+ file_contents_ro_filepath,
30
+ ENOENT,
31
+ hex_to_bin,
32
+ bin_to_hex,
33
+ exists,
34
+ chmod,
35
+ isfile,
36
+ remove,
37
+ rename,
38
+ dirname,
39
+ basename,
40
+ join
41
+ )
42
+
43
+ from gitdb.fun import (
44
+ chunk_size,
45
+ loose_object_header_info,
46
+ write_object,
47
+ stream_copy
48
+ )
49
+
50
+ from gitdb.utils.encoding import force_bytes
51
+
52
+ import tempfile
53
+ import os
54
+ import sys
55
+
56
+
57
+ __all__ = ('LooseObjectDB', )
58
+
59
+
60
+ class LooseObjectDB(FileDBBase, ObjectDBR, ObjectDBW):
61
+
62
+ """A database which operates on loose object files"""
63
+
64
+ # CONFIGURATION
65
+ # chunks in which data will be copied between streams
66
+ stream_chunk_size = chunk_size
67
+
68
+ # On windows we need to keep it writable, otherwise it cannot be removed
69
+ # either
70
+ new_objects_mode = int("444", 8)
71
+ if os.name == 'nt':
72
+ new_objects_mode = int("644", 8)
73
+
74
+ def __init__(self, root_path):
75
+ super().__init__(root_path)
76
+ self._hexsha_to_file = dict()
77
+ # Additional Flags - might be set to 0 after the first failure
78
+ # Depending on the root, this might work for some mounts, for others not, which
79
+ # is why it is per instance
80
+ self._fd_open_flags = getattr(os, 'O_NOATIME', 0)
81
+
82
+ #{ Interface
83
+ def object_path(self, hexsha):
84
+ """
85
+ :return: path at which the object with the given hexsha would be stored,
86
+ relative to the database root"""
87
+ return join(hexsha[:2], hexsha[2:])
88
+
89
+ def readable_db_object_path(self, hexsha):
90
+ """
91
+ :return: readable object path to the object identified by hexsha
92
+ :raise BadObject: If the object file does not exist"""
93
+ try:
94
+ return self._hexsha_to_file[hexsha]
95
+ except KeyError:
96
+ pass
97
+ # END ignore cache misses
98
+
99
+ # try filesystem
100
+ path = self.db_path(self.object_path(hexsha))
101
+ if exists(path):
102
+ self._hexsha_to_file[hexsha] = path
103
+ return path
104
+ # END handle cache
105
+ raise BadObject(hexsha)
106
+
107
+ def partial_to_complete_sha_hex(self, partial_hexsha):
108
+ """:return: 20 byte binary sha1 string which matches the given name uniquely
109
+ :param name: hexadecimal partial name (bytes or ascii string)
110
+ :raise AmbiguousObjectName:
111
+ :raise BadObject: """
112
+ candidate = None
113
+ for binsha in self.sha_iter():
114
+ if bin_to_hex(binsha).startswith(force_bytes(partial_hexsha)):
115
+ # it can't ever find the same object twice
116
+ if candidate is not None:
117
+ raise AmbiguousObjectName(partial_hexsha)
118
+ candidate = binsha
119
+ # END for each object
120
+ if candidate is None:
121
+ raise BadObject(partial_hexsha)
122
+ return candidate
123
+
124
+ #} END interface
125
+
126
+ def _map_loose_object(self, sha):
127
+ """
128
+ :return: memory map of that file to allow random read access
129
+ :raise BadObject: if object could not be located"""
130
+ db_path = self.db_path(self.object_path(bin_to_hex(sha)))
131
+ try:
132
+ return file_contents_ro_filepath(db_path, flags=self._fd_open_flags)
133
+ except OSError as e:
134
+ if e.errno != ENOENT:
135
+ # try again without noatime
136
+ try:
137
+ return file_contents_ro_filepath(db_path)
138
+ except OSError as new_e:
139
+ raise BadObject(sha) from new_e
140
+ # didn't work because of our flag, don't try it again
141
+ self._fd_open_flags = 0
142
+ else:
143
+ raise BadObject(sha) from e
144
+ # END handle error
145
+ # END exception handling
146
+
147
+ def set_ostream(self, stream):
148
+ """:raise TypeError: if the stream does not support the Sha1Writer interface"""
149
+ if stream is not None and not isinstance(stream, Sha1Writer):
150
+ raise TypeError("Output stream musst support the %s interface" % Sha1Writer.__name__)
151
+ return super().set_ostream(stream)
152
+
153
+ def info(self, sha):
154
+ m = self._map_loose_object(sha)
155
+ try:
156
+ typ, size = loose_object_header_info(m)
157
+ return OInfo(sha, typ, size)
158
+ finally:
159
+ if hasattr(m, 'close'):
160
+ m.close()
161
+ # END assure release of system resources
162
+
163
+ def stream(self, sha):
164
+ m = self._map_loose_object(sha)
165
+ type, size, stream = DecompressMemMapReader.new(m, close_on_deletion=True)
166
+ return OStream(sha, type, size, stream)
167
+
168
+ def has_object(self, sha):
169
+ try:
170
+ self.readable_db_object_path(bin_to_hex(sha))
171
+ return True
172
+ except BadObject:
173
+ return False
174
+ # END check existence
175
+
176
+ def store(self, istream):
177
+ """note: The sha we produce will be hex by nature"""
178
+ tmp_path = None
179
+ writer = self.ostream()
180
+ if writer is None:
181
+ # open a tmp file to write the data to
182
+ fd, tmp_path = tempfile.mkstemp(prefix='obj', dir=self._root_path)
183
+
184
+ if istream.binsha is None:
185
+ writer = FDCompressedSha1Writer(fd)
186
+ else:
187
+ writer = FDStream(fd)
188
+ # END handle direct stream copies
189
+ # END handle custom writer
190
+
191
+ try:
192
+ try:
193
+ if istream.binsha is not None:
194
+ # copy as much as possible, the actual uncompressed item size might
195
+ # be smaller than the compressed version
196
+ stream_copy(istream.read, writer.write, sys.maxsize, self.stream_chunk_size)
197
+ else:
198
+ # write object with header, we have to make a new one
199
+ write_object(istream.type, istream.size, istream.read, writer.write,
200
+ chunk_size=self.stream_chunk_size)
201
+ # END handle direct stream copies
202
+ finally:
203
+ if tmp_path:
204
+ writer.close()
205
+ # END assure target stream is closed
206
+ except:
207
+ if tmp_path:
208
+ os.remove(tmp_path)
209
+ raise
210
+ # END assure tmpfile removal on error
211
+
212
+ hexsha = None
213
+ if istream.binsha:
214
+ hexsha = istream.hexsha
215
+ else:
216
+ hexsha = writer.sha(as_hex=True)
217
+ # END handle sha
218
+
219
+ if tmp_path:
220
+ obj_path = self.db_path(self.object_path(hexsha))
221
+ obj_dir = dirname(obj_path)
222
+ os.makedirs(obj_dir, exist_ok=True)
223
+ # END handle destination directory
224
+ # rename onto existing doesn't work on NTFS
225
+ if isfile(obj_path):
226
+ remove(tmp_path)
227
+ else:
228
+ rename(tmp_path, obj_path)
229
+ # end rename only if needed
230
+
231
+ # make sure its readable for all ! It started out as rw-- tmp file
232
+ # but needs to be rwrr
233
+ chmod(obj_path, self.new_objects_mode)
234
+ # END handle dry_run
235
+
236
+ istream.binsha = hex_to_bin(hexsha)
237
+ return istream
238
+
239
+ def sha_iter(self):
240
+ # find all files which look like an object, extract sha from there
241
+ for root, dirs, files in os.walk(self.root_path()):
242
+ root_base = basename(root)
243
+ if len(root_base) != 2:
244
+ continue
245
+
246
+ for f in files:
247
+ if len(f) != 38:
248
+ continue
249
+ yield hex_to_bin(root_base + f)
250
+ # END for each file
251
+ # END for each walk iteration
252
+
253
+ def size(self):
254
+ return len(tuple(self.sha_iter()))
valley/lib/python3.10/site-packages/gitdb/db/mem.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ """Contains the MemoryDatabase implementation"""
6
+ from gitdb.db.loose import LooseObjectDB
7
+ from gitdb.db.base import (
8
+ ObjectDBR,
9
+ ObjectDBW
10
+ )
11
+
12
+ from gitdb.base import (
13
+ OStream,
14
+ IStream,
15
+ )
16
+
17
+ from gitdb.exc import (
18
+ BadObject,
19
+ UnsupportedOperation
20
+ )
21
+
22
+ from gitdb.stream import (
23
+ ZippedStoreShaWriter,
24
+ DecompressMemMapReader,
25
+ )
26
+
27
+ from io import BytesIO
28
+
29
+ __all__ = ("MemoryDB", )
30
+
31
+
32
+ class MemoryDB(ObjectDBR, ObjectDBW):
33
+
34
+ """A memory database stores everything to memory, providing fast IO and object
35
+ retrieval. It should be used to buffer results and obtain SHAs before writing
36
+ it to the actual physical storage, as it allows to query whether object already
37
+ exists in the target storage before introducing actual IO"""
38
+
39
+ def __init__(self):
40
+ super().__init__()
41
+ self._db = LooseObjectDB("path/doesnt/matter")
42
+
43
+ # maps 20 byte shas to their OStream objects
44
+ self._cache = dict()
45
+
46
+ def set_ostream(self, stream):
47
+ raise UnsupportedOperation("MemoryDB's always stream into memory")
48
+
49
+ def store(self, istream):
50
+ zstream = ZippedStoreShaWriter()
51
+ self._db.set_ostream(zstream)
52
+
53
+ istream = self._db.store(istream)
54
+ zstream.close() # close to flush
55
+ zstream.seek(0)
56
+
57
+ # don't provide a size, the stream is written in object format, hence the
58
+ # header needs decompression
59
+ decomp_stream = DecompressMemMapReader(zstream.getvalue(), close_on_deletion=False)
60
+ self._cache[istream.binsha] = OStream(istream.binsha, istream.type, istream.size, decomp_stream)
61
+
62
+ return istream
63
+
64
+ def has_object(self, sha):
65
+ return sha in self._cache
66
+
67
+ def info(self, sha):
68
+ # we always return streams, which are infos as well
69
+ return self.stream(sha)
70
+
71
+ def stream(self, sha):
72
+ try:
73
+ ostream = self._cache[sha]
74
+ # rewind stream for the next one to read
75
+ ostream.stream.seek(0)
76
+ return ostream
77
+ except KeyError as e:
78
+ raise BadObject(sha) from e
79
+ # END exception handling
80
+
81
+ def size(self):
82
+ return len(self._cache)
83
+
84
+ def sha_iter(self):
85
+ return self._cache.keys()
86
+
87
+ #{ Interface
88
+ def stream_copy(self, sha_iter, odb):
89
+ """Copy the streams as identified by sha's yielded by sha_iter into the given odb
90
+ The streams will be copied directly
91
+ **Note:** the object will only be written if it did not exist in the target db
92
+
93
+ :return: amount of streams actually copied into odb. If smaller than the amount
94
+ of input shas, one or more objects did already exist in odb"""
95
+ count = 0
96
+ for sha in sha_iter:
97
+ if odb.has_object(sha):
98
+ continue
99
+ # END check object existence
100
+
101
+ ostream = self.stream(sha)
102
+ # compressed data including header
103
+ sio = BytesIO(ostream.stream.data())
104
+ istream = IStream(ostream.type, ostream.size, sio, sha)
105
+
106
+ odb.store(istream)
107
+ count += 1
108
+ # END for each sha
109
+ return count
110
+ #} END interface
valley/lib/python3.10/site-packages/gitdb/db/pack.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ """Module containing a database to deal with packs"""
6
+ from gitdb.db.base import (
7
+ FileDBBase,
8
+ ObjectDBR,
9
+ CachingDB
10
+ )
11
+
12
+ from gitdb.util import LazyMixin
13
+
14
+ from gitdb.exc import (
15
+ BadObject,
16
+ UnsupportedOperation,
17
+ AmbiguousObjectName
18
+ )
19
+
20
+ from gitdb.pack import PackEntity
21
+
22
+ from functools import reduce
23
+
24
+ import os
25
+ import glob
26
+
27
+ __all__ = ('PackedDB', )
28
+
29
+ #{ Utilities
30
+
31
+
32
+ class PackedDB(FileDBBase, ObjectDBR, CachingDB, LazyMixin):
33
+
34
+ """A database operating on a set of object packs"""
35
+
36
+ # sort the priority list every N queries
37
+ # Higher values are better, performance tests don't show this has
38
+ # any effect, but it should have one
39
+ _sort_interval = 500
40
+
41
+ def __init__(self, root_path):
42
+ super().__init__(root_path)
43
+ # list of lists with three items:
44
+ # * hits - number of times the pack was hit with a request
45
+ # * entity - Pack entity instance
46
+ # * sha_to_index - PackIndexFile.sha_to_index method for direct cache query
47
+ # self._entities = list() # lazy loaded list
48
+ self._hit_count = 0 # amount of hits
49
+ self._st_mtime = 0 # last modification data of our root path
50
+
51
+ def _set_cache_(self, attr):
52
+ if attr == '_entities':
53
+ self._entities = list()
54
+ self.update_cache(force=True)
55
+ # END handle entities initialization
56
+
57
+ def _sort_entities(self):
58
+ self._entities.sort(key=lambda l: l[0], reverse=True)
59
+
60
+ def _pack_info(self, sha):
61
+ """:return: tuple(entity, index) for an item at the given sha
62
+ :param sha: 20 or 40 byte sha
63
+ :raise BadObject:
64
+ **Note:** This method is not thread-safe, but may be hit in multi-threaded
65
+ operation. The worst thing that can happen though is a counter that
66
+ was not incremented, or the list being in wrong order. So we safe
67
+ the time for locking here, lets see how that goes"""
68
+ # presort ?
69
+ if self._hit_count % self._sort_interval == 0:
70
+ self._sort_entities()
71
+ # END update sorting
72
+
73
+ for item in self._entities:
74
+ index = item[2](sha)
75
+ if index is not None:
76
+ item[0] += 1 # one hit for you
77
+ self._hit_count += 1 # general hit count
78
+ return (item[1], index)
79
+ # END index found in pack
80
+ # END for each item
81
+
82
+ # no hit, see whether we have to update packs
83
+ # NOTE: considering packs don't change very often, we safe this call
84
+ # and leave it to the super-caller to trigger that
85
+ raise BadObject(sha)
86
+
87
+ #{ Object DB Read
88
+
89
+ def has_object(self, sha):
90
+ try:
91
+ self._pack_info(sha)
92
+ return True
93
+ except BadObject:
94
+ return False
95
+ # END exception handling
96
+
97
+ def info(self, sha):
98
+ entity, index = self._pack_info(sha)
99
+ return entity.info_at_index(index)
100
+
101
+ def stream(self, sha):
102
+ entity, index = self._pack_info(sha)
103
+ return entity.stream_at_index(index)
104
+
105
+ def sha_iter(self):
106
+ for entity in self.entities():
107
+ index = entity.index()
108
+ sha_by_index = index.sha
109
+ for index in range(index.size()):
110
+ yield sha_by_index(index)
111
+ # END for each index
112
+ # END for each entity
113
+
114
+ def size(self):
115
+ sizes = [item[1].index().size() for item in self._entities]
116
+ return reduce(lambda x, y: x + y, sizes, 0)
117
+
118
+ #} END object db read
119
+
120
+ #{ object db write
121
+
122
+ def store(self, istream):
123
+ """Storing individual objects is not feasible as a pack is designed to
124
+ hold multiple objects. Writing or rewriting packs for single objects is
125
+ inefficient"""
126
+ raise UnsupportedOperation()
127
+
128
+ #} END object db write
129
+
130
+ #{ Interface
131
+
132
+ def update_cache(self, force=False):
133
+ """
134
+ Update our cache with the actually existing packs on disk. Add new ones,
135
+ and remove deleted ones. We keep the unchanged ones
136
+
137
+ :param force: If True, the cache will be updated even though the directory
138
+ does not appear to have changed according to its modification timestamp.
139
+ :return: True if the packs have been updated so there is new information,
140
+ False if there was no change to the pack database"""
141
+ stat = os.stat(self.root_path())
142
+ if not force and stat.st_mtime <= self._st_mtime:
143
+ return False
144
+ # END abort early on no change
145
+ self._st_mtime = stat.st_mtime
146
+
147
+ # packs are supposed to be prefixed with pack- by git-convention
148
+ # get all pack files, figure out what changed
149
+ pack_files = set(glob.glob(os.path.join(self.root_path(), "pack-*.pack")))
150
+ our_pack_files = {item[1].pack().path() for item in self._entities}
151
+
152
+ # new packs
153
+ for pack_file in (pack_files - our_pack_files):
154
+ # init the hit-counter/priority with the size, a good measure for hit-
155
+ # probability. Its implemented so that only 12 bytes will be read
156
+ entity = PackEntity(pack_file)
157
+ self._entities.append([entity.pack().size(), entity, entity.index().sha_to_index])
158
+ # END for each new packfile
159
+
160
+ # removed packs
161
+ for pack_file in (our_pack_files - pack_files):
162
+ del_index = -1
163
+ for i, item in enumerate(self._entities):
164
+ if item[1].pack().path() == pack_file:
165
+ del_index = i
166
+ break
167
+ # END found index
168
+ # END for each entity
169
+ assert del_index != -1
170
+ del(self._entities[del_index])
171
+ # END for each removed pack
172
+
173
+ # reinitialize prioritiess
174
+ self._sort_entities()
175
+ return True
176
+
177
+ def entities(self):
178
+ """:return: list of pack entities operated upon by this database"""
179
+ return [item[1] for item in self._entities]
180
+
181
+ def partial_to_complete_sha(self, partial_binsha, canonical_length):
182
+ """:return: 20 byte sha as inferred by the given partial binary sha
183
+ :param partial_binsha: binary sha with less than 20 bytes
184
+ :param canonical_length: length of the corresponding canonical representation.
185
+ It is required as binary sha's cannot display whether the original hex sha
186
+ had an odd or even number of characters
187
+ :raise AmbiguousObjectName:
188
+ :raise BadObject: """
189
+ candidate = None
190
+ for item in self._entities:
191
+ item_index = item[1].index().partial_sha_to_index(partial_binsha, canonical_length)
192
+ if item_index is not None:
193
+ sha = item[1].index().sha(item_index)
194
+ if candidate and candidate != sha:
195
+ raise AmbiguousObjectName(partial_binsha)
196
+ candidate = sha
197
+ # END handle full sha could be found
198
+ # END for each entity
199
+
200
+ if candidate:
201
+ return candidate
202
+
203
+ # still not found ?
204
+ raise BadObject(partial_binsha)
205
+
206
+ #} END interface
valley/lib/python3.10/site-packages/gitdb/test/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
valley/lib/python3.10/site-packages/gitdb/test/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (162 Bytes). View file
 
valley/lib/python3.10/site-packages/gitdb/test/__pycache__/lib.cpython-310.pyc ADDED
Binary file (5.62 kB). View file
 
valley/lib/python3.10/site-packages/gitdb/test/__pycache__/test_base.cpython-310.pyc ADDED
Binary file (1.95 kB). View file
 
valley/lib/python3.10/site-packages/gitdb/test/__pycache__/test_example.cpython-310.pyc ADDED
Binary file (1.18 kB). View file
 
valley/lib/python3.10/site-packages/gitdb/test/__pycache__/test_pack.cpython-310.pyc ADDED
Binary file (5.79 kB). View file
 
valley/lib/python3.10/site-packages/gitdb/test/__pycache__/test_stream.cpython-310.pyc ADDED
Binary file (4.1 kB). View file
 
valley/lib/python3.10/site-packages/gitdb/test/__pycache__/test_util.cpython-310.pyc ADDED
Binary file (2.15 kB). View file
 
valley/lib/python3.10/site-packages/gitdb/test/lib.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ """Utilities used in ODB testing"""
6
+ from gitdb import OStream
7
+
8
+ import sys
9
+ import random
10
+ from array import array
11
+
12
+ from io import BytesIO
13
+
14
+ import glob
15
+ import unittest
16
+ import tempfile
17
+ import shutil
18
+ import os
19
+ import gc
20
+ import logging
21
+ from functools import wraps
22
+
23
+
24
+ #{ Bases
25
+
26
+ class TestBase(unittest.TestCase):
27
+ """Base class for all tests
28
+
29
+ TestCase providing access to readonly repositories using the following member variables.
30
+
31
+ * gitrepopath
32
+
33
+ * read-only base path of the git source repository, i.e. .../git/.git
34
+ """
35
+
36
+ #{ Invvariants
37
+ k_env_git_repo = "GITDB_TEST_GIT_REPO_BASE"
38
+ #} END invariants
39
+
40
+ @classmethod
41
+ def setUpClass(cls):
42
+ try:
43
+ super().setUpClass()
44
+ except AttributeError:
45
+ pass
46
+
47
+ cls.gitrepopath = os.environ.get(cls.k_env_git_repo)
48
+ if not cls.gitrepopath:
49
+ logging.info(
50
+ "You can set the %s environment variable to a .git repository of your choice - defaulting to the gitdb repository", cls.k_env_git_repo)
51
+ ospd = os.path.dirname
52
+ cls.gitrepopath = os.path.join(ospd(ospd(ospd(__file__))), '.git')
53
+ # end assure gitrepo is set
54
+ assert cls.gitrepopath.endswith('.git')
55
+
56
+
57
+ #} END bases
58
+
59
+ #{ Decorators
60
+
61
+ def with_rw_directory(func):
62
+ """Create a temporary directory which can be written to, remove it if the
63
+ test succeeds, but leave it otherwise to aid additional debugging"""
64
+
65
+ def wrapper(self):
66
+ path = tempfile.mktemp(prefix=func.__name__)
67
+ os.mkdir(path)
68
+ keep = False
69
+ try:
70
+ try:
71
+ return func(self, path)
72
+ except Exception:
73
+ sys.stderr.write(f"Test {type(self).__name__}.{func.__name__} failed, output is at {path!r}\n")
74
+ keep = True
75
+ raise
76
+ finally:
77
+ # Need to collect here to be sure all handles have been closed. It appears
78
+ # a windows-only issue. In fact things should be deleted, as well as
79
+ # memory maps closed, once objects go out of scope. For some reason
80
+ # though this is not the case here unless we collect explicitly.
81
+ if not keep:
82
+ gc.collect()
83
+ shutil.rmtree(path)
84
+ # END handle exception
85
+ # END wrapper
86
+
87
+ wrapper.__name__ = func.__name__
88
+ return wrapper
89
+
90
+
91
+ def with_packs_rw(func):
92
+ """Function that provides a path into which the packs for testing should be
93
+ copied. Will pass on the path to the actual function afterwards"""
94
+
95
+ def wrapper(self, path):
96
+ src_pack_glob = fixture_path('packs/*')
97
+ copy_files_globbed(src_pack_glob, path, hard_link_ok=True)
98
+ return func(self, path)
99
+ # END wrapper
100
+
101
+ wrapper.__name__ = func.__name__
102
+ return wrapper
103
+
104
+ #} END decorators
105
+
106
+ #{ Routines
107
+
108
+
109
+ def fixture_path(relapath=''):
110
+ """:return: absolute path into the fixture directory
111
+ :param relapath: relative path into the fixtures directory, or ''
112
+ to obtain the fixture directory itself"""
113
+ return os.path.join(os.path.dirname(__file__), 'fixtures', relapath)
114
+
115
+
116
+ def copy_files_globbed(source_glob, target_dir, hard_link_ok=False):
117
+ """Copy all files found according to the given source glob into the target directory
118
+ :param hard_link_ok: if True, hard links will be created if possible. Otherwise
119
+ the files will be copied"""
120
+ for src_file in glob.glob(source_glob):
121
+ if hard_link_ok and hasattr(os, 'link'):
122
+ target = os.path.join(target_dir, os.path.basename(src_file))
123
+ try:
124
+ os.link(src_file, target)
125
+ except OSError:
126
+ shutil.copy(src_file, target_dir)
127
+ # END handle cross device links ( and resulting failure )
128
+ else:
129
+ shutil.copy(src_file, target_dir)
130
+ # END try hard link
131
+ # END for each file to copy
132
+
133
+
134
+ def make_bytes(size_in_bytes, randomize=False):
135
+ """:return: string with given size in bytes
136
+ :param randomize: try to produce a very random stream"""
137
+ actual_size = size_in_bytes // 4
138
+ producer = range(actual_size)
139
+ if randomize:
140
+ producer = list(producer)
141
+ random.shuffle(producer)
142
+ # END randomize
143
+ a = array('i', producer)
144
+ return a.tobytes()
145
+
146
+
147
+ def make_object(type, data):
148
+ """:return: bytes resembling an uncompressed object"""
149
+ odata = "blob %i\0" % len(data)
150
+ return odata.encode("ascii") + data
151
+
152
+
153
+ def make_memory_file(size_in_bytes, randomize=False):
154
+ """:return: tuple(size_of_stream, stream)
155
+ :param randomize: try to produce a very random stream"""
156
+ d = make_bytes(size_in_bytes, randomize)
157
+ return len(d), BytesIO(d)
158
+
159
+ #} END routines
160
+
161
+ #{ Stream Utilities
162
+
163
+
164
+ class DummyStream:
165
+
166
+ def __init__(self):
167
+ self.was_read = False
168
+ self.bytes = 0
169
+ self.closed = False
170
+
171
+ def read(self, size):
172
+ self.was_read = True
173
+ self.bytes = size
174
+
175
+ def close(self):
176
+ self.closed = True
177
+
178
+ def _assert(self):
179
+ assert self.was_read
180
+
181
+
182
+ class DeriveTest(OStream):
183
+
184
+ def __init__(self, sha, type, size, stream, *args, **kwargs):
185
+ self.myarg = kwargs.pop('myarg')
186
+ self.args = args
187
+
188
+ def _assert(self):
189
+ assert self.args
190
+ assert self.myarg
191
+
192
+ #} END stream utilitiess
valley/lib/python3.10/site-packages/gitdb/test/test_base.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ """Test for object db"""
6
+ from gitdb.test.lib import (
7
+ TestBase,
8
+ DummyStream,
9
+ DeriveTest,
10
+ )
11
+
12
+ from gitdb import (
13
+ OInfo,
14
+ OPackInfo,
15
+ ODeltaPackInfo,
16
+ OStream,
17
+ OPackStream,
18
+ ODeltaPackStream,
19
+ IStream
20
+ )
21
+ from gitdb.util import (
22
+ NULL_BIN_SHA
23
+ )
24
+
25
+ from gitdb.typ import (
26
+ str_blob_type
27
+ )
28
+
29
+
30
+ class TestBaseTypes(TestBase):
31
+
32
+ def test_streams(self):
33
+ # test info
34
+ sha = NULL_BIN_SHA
35
+ s = 20
36
+ blob_id = 3
37
+
38
+ info = OInfo(sha, str_blob_type, s)
39
+ assert info.binsha == sha
40
+ assert info.type == str_blob_type
41
+ assert info.type_id == blob_id
42
+ assert info.size == s
43
+
44
+ # test pack info
45
+ # provides type_id
46
+ pinfo = OPackInfo(0, blob_id, s)
47
+ assert pinfo.type == str_blob_type
48
+ assert pinfo.type_id == blob_id
49
+ assert pinfo.pack_offset == 0
50
+
51
+ dpinfo = ODeltaPackInfo(0, blob_id, s, sha)
52
+ assert dpinfo.type == str_blob_type
53
+ assert dpinfo.type_id == blob_id
54
+ assert dpinfo.delta_info == sha
55
+ assert dpinfo.pack_offset == 0
56
+
57
+ # test ostream
58
+ stream = DummyStream()
59
+ ostream = OStream(*(info + (stream, )))
60
+ assert ostream.stream is stream
61
+ ostream.read(15)
62
+ stream._assert()
63
+ assert stream.bytes == 15
64
+ ostream.read(20)
65
+ assert stream.bytes == 20
66
+
67
+ # test packstream
68
+ postream = OPackStream(*(pinfo + (stream, )))
69
+ assert postream.stream is stream
70
+ postream.read(10)
71
+ stream._assert()
72
+ assert stream.bytes == 10
73
+
74
+ # test deltapackstream
75
+ dpostream = ODeltaPackStream(*(dpinfo + (stream, )))
76
+ dpostream.stream is stream
77
+ dpostream.read(5)
78
+ stream._assert()
79
+ assert stream.bytes == 5
80
+
81
+ # derive with own args
82
+ DeriveTest(sha, str_blob_type, s, stream, 'mine', myarg=3)._assert()
83
+
84
+ # test istream
85
+ istream = IStream(str_blob_type, s, stream)
86
+ assert istream.binsha == None
87
+ istream.binsha = sha
88
+ assert istream.binsha == sha
89
+
90
+ assert len(istream.binsha) == 20
91
+ assert len(istream.hexsha) == 40
92
+
93
+ assert istream.size == s
94
+ istream.size = s * 2
95
+ istream.size == s * 2
96
+ assert istream.type == str_blob_type
97
+ istream.type = "something"
98
+ assert istream.type == "something"
99
+ assert istream.stream is stream
100
+ istream.stream = None
101
+ assert istream.stream is None
102
+
103
+ assert istream.error is None
104
+ istream.error = Exception()
105
+ assert isinstance(istream.error, Exception)
valley/lib/python3.10/site-packages/gitdb/test/test_example.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ """Module with examples from the tutorial section of the docs"""
6
+ import os
7
+ from gitdb.test.lib import TestBase
8
+ from gitdb import IStream
9
+ from gitdb.db import LooseObjectDB
10
+
11
+ from io import BytesIO
12
+
13
+
14
+ class TestExamples(TestBase):
15
+
16
+ def test_base(self):
17
+ ldb = LooseObjectDB(os.path.join(self.gitrepopath, 'objects'))
18
+
19
+ for sha1 in ldb.sha_iter():
20
+ oinfo = ldb.info(sha1)
21
+ ostream = ldb.stream(sha1)
22
+ assert oinfo[:3] == ostream[:3]
23
+
24
+ assert len(ostream.read()) == ostream.size
25
+ assert ldb.has_object(oinfo.binsha)
26
+ # END for each sha in database
27
+ # assure we close all files
28
+ try:
29
+ del(ostream)
30
+ del(oinfo)
31
+ except UnboundLocalError:
32
+ pass
33
+ # END ignore exception if there are no loose objects
34
+
35
+ data = b"my data"
36
+ istream = IStream("blob", len(data), BytesIO(data))
37
+
38
+ # the object does not yet have a sha
39
+ assert istream.binsha is None
40
+ ldb.store(istream)
41
+ # now the sha is set
42
+ assert len(istream.binsha) == 20
43
+ assert ldb.has_object(istream.binsha)
valley/lib/python3.10/site-packages/gitdb/test/test_pack.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ """Test everything about packs reading and writing"""
6
+ from gitdb.test.lib import (
7
+ TestBase,
8
+ with_rw_directory,
9
+ fixture_path
10
+ )
11
+
12
+ from gitdb.stream import DeltaApplyReader
13
+
14
+ from gitdb.pack import (
15
+ PackEntity,
16
+ PackIndexFile,
17
+ PackFile
18
+ )
19
+
20
+ from gitdb.base import (
21
+ OInfo,
22
+ OStream,
23
+ )
24
+
25
+ from gitdb.fun import delta_types
26
+ from gitdb.exc import UnsupportedOperation
27
+ from gitdb.util import to_bin_sha
28
+
29
+ import pytest
30
+
31
+ import os
32
+ import tempfile
33
+
34
+
35
+ #{ Utilities
36
+ def bin_sha_from_filename(filename):
37
+ return to_bin_sha(os.path.splitext(os.path.basename(filename))[0][5:])
38
+ #} END utilities
39
+
40
+
41
+ class TestPack(TestBase):
42
+
43
+ packindexfile_v1 = (fixture_path('packs/pack-c0438c19fb16422b6bbcce24387b3264416d485b.idx'), 1, 67)
44
+ packindexfile_v2 = (fixture_path('packs/pack-11fdfa9e156ab73caae3b6da867192221f2089c2.idx'), 2, 30)
45
+ packindexfile_v2_3_ascii = (fixture_path('packs/pack-a2bf8e71d8c18879e499335762dd95119d93d9f1.idx'), 2, 42)
46
+ packfile_v2_1 = (fixture_path('packs/pack-c0438c19fb16422b6bbcce24387b3264416d485b.pack'), 2, packindexfile_v1[2])
47
+ packfile_v2_2 = (fixture_path('packs/pack-11fdfa9e156ab73caae3b6da867192221f2089c2.pack'), 2, packindexfile_v2[2])
48
+ packfile_v2_3_ascii = (
49
+ fixture_path('packs/pack-a2bf8e71d8c18879e499335762dd95119d93d9f1.pack'), 2, packindexfile_v2_3_ascii[2])
50
+
51
+ def _assert_index_file(self, index, version, size):
52
+ assert index.packfile_checksum() != index.indexfile_checksum()
53
+ assert len(index.packfile_checksum()) == 20
54
+ assert len(index.indexfile_checksum()) == 20
55
+ assert index.version() == version
56
+ assert index.size() == size
57
+ assert len(index.offsets()) == size
58
+
59
+ # get all data of all objects
60
+ for oidx in range(index.size()):
61
+ sha = index.sha(oidx)
62
+ assert oidx == index.sha_to_index(sha)
63
+
64
+ entry = index.entry(oidx)
65
+ assert len(entry) == 3
66
+
67
+ assert entry[0] == index.offset(oidx)
68
+ assert entry[1] == sha
69
+ assert entry[2] == index.crc(oidx)
70
+
71
+ # verify partial sha
72
+ for l in (4, 8, 11, 17, 20):
73
+ assert index.partial_sha_to_index(sha[:l], l * 2) == oidx
74
+
75
+ # END for each object index in indexfile
76
+ self.assertRaises(ValueError, index.partial_sha_to_index, "\0", 2)
77
+
78
+ def _assert_pack_file(self, pack, version, size):
79
+ assert pack.version() == 2
80
+ assert pack.size() == size
81
+ assert len(pack.checksum()) == 20
82
+
83
+ num_obj = 0
84
+ for obj in pack.stream_iter():
85
+ num_obj += 1
86
+ info = pack.info(obj.pack_offset)
87
+ stream = pack.stream(obj.pack_offset)
88
+
89
+ assert info.pack_offset == stream.pack_offset
90
+ assert info.type_id == stream.type_id
91
+ assert hasattr(stream, 'read')
92
+
93
+ # it should be possible to read from both streams
94
+ assert obj.read() == stream.read()
95
+
96
+ streams = pack.collect_streams(obj.pack_offset)
97
+ assert streams
98
+
99
+ # read the stream
100
+ try:
101
+ dstream = DeltaApplyReader.new(streams)
102
+ except ValueError:
103
+ # ignore these, old git versions use only ref deltas,
104
+ # which we haven't resolved ( as we are without an index )
105
+ # Also ignore non-delta streams
106
+ continue
107
+ # END get deltastream
108
+
109
+ # read all
110
+ data = dstream.read()
111
+ assert len(data) == dstream.size
112
+
113
+ # test seek
114
+ dstream.seek(0)
115
+ assert dstream.read() == data
116
+
117
+ # read chunks
118
+ # NOTE: the current implementation is safe, it basically transfers
119
+ # all calls to the underlying memory map
120
+
121
+ # END for each object
122
+ assert num_obj == size
123
+
124
+ def test_pack_index(self):
125
+ # check version 1 and 2
126
+ for indexfile, version, size in (self.packindexfile_v1, self.packindexfile_v2):
127
+ index = PackIndexFile(indexfile)
128
+ self._assert_index_file(index, version, size)
129
+ # END run tests
130
+
131
+ def test_pack(self):
132
+ # there is this special version 3, but apparently its like 2 ...
133
+ for packfile, version, size in (self.packfile_v2_3_ascii, self.packfile_v2_1, self.packfile_v2_2):
134
+ pack = PackFile(packfile)
135
+ self._assert_pack_file(pack, version, size)
136
+ # END for each pack to test
137
+
138
+ @with_rw_directory
139
+ def test_pack_entity(self, rw_dir):
140
+ pack_objs = list()
141
+ for packinfo, indexinfo in ((self.packfile_v2_1, self.packindexfile_v1),
142
+ (self.packfile_v2_2, self.packindexfile_v2),
143
+ (self.packfile_v2_3_ascii, self.packindexfile_v2_3_ascii)):
144
+ packfile, version, size = packinfo
145
+ indexfile, version, size = indexinfo
146
+ entity = PackEntity(packfile)
147
+ assert entity.pack().path() == packfile
148
+ assert entity.index().path() == indexfile
149
+ pack_objs.extend(entity.stream_iter())
150
+
151
+ count = 0
152
+ for info, stream in zip(entity.info_iter(), entity.stream_iter()):
153
+ count += 1
154
+ assert info.binsha == stream.binsha
155
+ assert len(info.binsha) == 20
156
+ assert info.type_id == stream.type_id
157
+ assert info.size == stream.size
158
+
159
+ # we return fully resolved items, which is implied by the sha centric access
160
+ assert not info.type_id in delta_types
161
+
162
+ # try all calls
163
+ assert len(entity.collect_streams(info.binsha))
164
+ oinfo = entity.info(info.binsha)
165
+ assert isinstance(oinfo, OInfo)
166
+ assert oinfo.binsha is not None
167
+ ostream = entity.stream(info.binsha)
168
+ assert isinstance(ostream, OStream)
169
+ assert ostream.binsha is not None
170
+
171
+ # verify the stream
172
+ try:
173
+ assert entity.is_valid_stream(info.binsha, use_crc=True)
174
+ except UnsupportedOperation:
175
+ pass
176
+ # END ignore version issues
177
+ assert entity.is_valid_stream(info.binsha, use_crc=False)
178
+ # END for each info, stream tuple
179
+ assert count == size
180
+
181
+ # END for each entity
182
+
183
+ # pack writing - write all packs into one
184
+ # index path can be None
185
+ pack_path1 = tempfile.mktemp('', "pack1", rw_dir)
186
+ pack_path2 = tempfile.mktemp('', "pack2", rw_dir)
187
+ index_path = tempfile.mktemp('', 'index', rw_dir)
188
+ iteration = 0
189
+
190
+ def rewind_streams():
191
+ for obj in pack_objs:
192
+ obj.stream.seek(0)
193
+ # END utility
194
+ for ppath, ipath, num_obj in zip((pack_path1, pack_path2),
195
+ (index_path, None),
196
+ (len(pack_objs), None)):
197
+ iwrite = None
198
+ if ipath:
199
+ ifile = open(ipath, 'wb')
200
+ iwrite = ifile.write
201
+ # END handle ip
202
+
203
+ # make sure we rewind the streams ... we work on the same objects over and over again
204
+ if iteration > 0:
205
+ rewind_streams()
206
+ # END rewind streams
207
+ iteration += 1
208
+
209
+ with open(ppath, 'wb') as pfile:
210
+ pack_sha, index_sha = PackEntity.write_pack(pack_objs, pfile.write, iwrite, object_count=num_obj)
211
+ assert os.path.getsize(ppath) > 100
212
+
213
+ # verify pack
214
+ pf = PackFile(ppath)
215
+ assert pf.size() == len(pack_objs)
216
+ assert pf.version() == PackFile.pack_version_default
217
+ assert pf.checksum() == pack_sha
218
+ pf.close()
219
+
220
+ # verify index
221
+ if ipath is not None:
222
+ ifile.close()
223
+ assert os.path.getsize(ipath) > 100
224
+ idx = PackIndexFile(ipath)
225
+ assert idx.version() == PackIndexFile.index_version_default
226
+ assert idx.packfile_checksum() == pack_sha
227
+ assert idx.indexfile_checksum() == index_sha
228
+ assert idx.size() == len(pack_objs)
229
+ idx.close()
230
+ # END verify files exist
231
+ # END for each packpath, indexpath pair
232
+
233
+ # verify the packs thoroughly
234
+ rewind_streams()
235
+ entity = PackEntity.create(pack_objs, rw_dir)
236
+ count = 0
237
+ for info in entity.info_iter():
238
+ count += 1
239
+ for use_crc in range(2):
240
+ assert entity.is_valid_stream(info.binsha, use_crc)
241
+ # END for each crc mode
242
+ # END for each info
243
+ assert count == len(pack_objs)
244
+ entity.close()
245
+
246
+ def test_pack_64(self):
247
+ # TODO: hex-edit a pack helping us to verify that we can handle 64 byte offsets
248
+ # of course without really needing such a huge pack
249
+ pytest.skip('not implemented')
valley/lib/python3.10/site-packages/gitdb/test/test_stream.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ """Test for object db"""
6
+
7
+ from gitdb.test.lib import (
8
+ TestBase,
9
+ DummyStream,
10
+ make_bytes,
11
+ make_object,
12
+ fixture_path
13
+ )
14
+
15
+ from gitdb import (
16
+ DecompressMemMapReader,
17
+ FDCompressedSha1Writer,
18
+ LooseObjectDB,
19
+ Sha1Writer,
20
+ MemoryDB,
21
+ IStream,
22
+ )
23
+ from gitdb.util import hex_to_bin
24
+
25
+ import zlib
26
+ from gitdb.typ import (
27
+ str_blob_type
28
+ )
29
+
30
+ import tempfile
31
+ import os
32
+ from io import BytesIO
33
+
34
+
35
+ class TestStream(TestBase):
36
+
37
+ """Test stream classes"""
38
+
39
+ data_sizes = (15, 10000, 1000 * 1024 + 512)
40
+
41
+ def _assert_stream_reader(self, stream, cdata, rewind_stream=lambda s: None):
42
+ """Make stream tests - the orig_stream is seekable, allowing it to be
43
+ rewound and reused
44
+ :param cdata: the data we expect to read from stream, the contents
45
+ :param rewind_stream: function called to rewind the stream to make it ready
46
+ for reuse"""
47
+ ns = 10
48
+ assert len(cdata) > ns - 1, "Data must be larger than %i, was %i" % (ns, len(cdata))
49
+
50
+ # read in small steps
51
+ ss = len(cdata) // ns
52
+ for i in range(ns):
53
+ data = stream.read(ss)
54
+ chunk = cdata[i * ss:(i + 1) * ss]
55
+ assert data == chunk
56
+ # END for each step
57
+ rest = stream.read()
58
+ if rest:
59
+ assert rest == cdata[-len(rest):]
60
+ # END handle rest
61
+
62
+ if isinstance(stream, DecompressMemMapReader):
63
+ assert len(stream.data()) == stream.compressed_bytes_read()
64
+ # END handle special type
65
+
66
+ rewind_stream(stream)
67
+
68
+ # read everything
69
+ rdata = stream.read()
70
+ assert rdata == cdata
71
+
72
+ if isinstance(stream, DecompressMemMapReader):
73
+ assert len(stream.data()) == stream.compressed_bytes_read()
74
+ # END handle special type
75
+
76
+ def test_decompress_reader(self):
77
+ for close_on_deletion in range(2):
78
+ for with_size in range(2):
79
+ for ds in self.data_sizes:
80
+ cdata = make_bytes(ds, randomize=False)
81
+
82
+ # zdata = zipped actual data
83
+ # cdata = original content data
84
+
85
+ # create reader
86
+ if with_size:
87
+ # need object data
88
+ zdata = zlib.compress(make_object(str_blob_type, cdata))
89
+ typ, size, reader = DecompressMemMapReader.new(zdata, close_on_deletion)
90
+ assert size == len(cdata)
91
+ assert typ == str_blob_type
92
+
93
+ # even if we don't set the size, it will be set automatically on first read
94
+ test_reader = DecompressMemMapReader(zdata, close_on_deletion=False)
95
+ assert test_reader._s == len(cdata)
96
+ else:
97
+ # here we need content data
98
+ zdata = zlib.compress(cdata)
99
+ reader = DecompressMemMapReader(zdata, close_on_deletion, len(cdata))
100
+ assert reader._s == len(cdata)
101
+ # END get reader
102
+
103
+ self._assert_stream_reader(reader, cdata, lambda r: r.seek(0))
104
+
105
+ # put in a dummy stream for closing
106
+ dummy = DummyStream()
107
+ reader._m = dummy
108
+
109
+ assert not dummy.closed
110
+ del(reader)
111
+ assert dummy.closed == close_on_deletion
112
+ # END for each datasize
113
+ # END whether size should be used
114
+ # END whether stream should be closed when deleted
115
+
116
+ def test_sha_writer(self):
117
+ writer = Sha1Writer()
118
+ assert 2 == writer.write(b"hi")
119
+ assert len(writer.sha(as_hex=1)) == 40
120
+ assert len(writer.sha(as_hex=0)) == 20
121
+
122
+ # make sure it does something ;)
123
+ prev_sha = writer.sha()
124
+ writer.write(b"hi again")
125
+ assert writer.sha() != prev_sha
126
+
127
+ def test_compressed_writer(self):
128
+ for ds in self.data_sizes:
129
+ fd, path = tempfile.mkstemp()
130
+ ostream = FDCompressedSha1Writer(fd)
131
+ data = make_bytes(ds, randomize=False)
132
+
133
+ # for now, just a single write, code doesn't care about chunking
134
+ assert len(data) == ostream.write(data)
135
+ ostream.close()
136
+
137
+ # its closed already
138
+ self.assertRaises(OSError, os.close, fd)
139
+
140
+ # read everything back, compare to data we zip
141
+ fd = os.open(path, os.O_RDONLY | getattr(os, 'O_BINARY', 0))
142
+ written_data = os.read(fd, os.path.getsize(path))
143
+ assert len(written_data) == os.path.getsize(path)
144
+ os.close(fd)
145
+ assert written_data == zlib.compress(data, 1) # best speed
146
+
147
+ os.remove(path)
148
+ # END for each os
149
+
150
+ def test_decompress_reader_special_case(self):
151
+ odb = LooseObjectDB(fixture_path('objects'))
152
+ mdb = MemoryDB()
153
+ for sha in (b'888401851f15db0eed60eb1bc29dec5ddcace911',
154
+ b'7bb839852ed5e3a069966281bb08d50012fb309b',):
155
+ ostream = odb.stream(hex_to_bin(sha))
156
+
157
+ # if there is a bug, we will be missing one byte exactly !
158
+ data = ostream.read()
159
+ assert len(data) == ostream.size
160
+
161
+ # Putting it back in should yield nothing new - after all, we have
162
+ dump = mdb.store(IStream(ostream.type, ostream.size, BytesIO(data)))
163
+ assert dump.hexsha == sha
164
+ # end for each loose object sha to test
valley/lib/python3.10/site-packages/gitdb/test/test_util.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ """Test for object db"""
6
+ import tempfile
7
+ import os
8
+
9
+ from gitdb.test.lib import TestBase
10
+ from gitdb.util import (
11
+ to_hex_sha,
12
+ to_bin_sha,
13
+ NULL_HEX_SHA,
14
+ LockedFD
15
+ )
16
+
17
+
18
+ class TestUtils(TestBase):
19
+
20
+ def test_basics(self):
21
+ assert to_hex_sha(NULL_HEX_SHA) == NULL_HEX_SHA
22
+ assert len(to_bin_sha(NULL_HEX_SHA)) == 20
23
+ assert to_hex_sha(to_bin_sha(NULL_HEX_SHA)) == NULL_HEX_SHA.encode("ascii")
24
+
25
+ def _cmp_contents(self, file_path, data):
26
+ # raise if data from file at file_path
27
+ # does not match data string
28
+ with open(file_path, "rb") as fp:
29
+ assert fp.read() == data.encode("ascii")
30
+
31
+ def test_lockedfd(self):
32
+ my_file = tempfile.mktemp()
33
+ orig_data = "hello"
34
+ new_data = "world"
35
+ with open(my_file, "wb") as my_file_fp:
36
+ my_file_fp.write(orig_data.encode("ascii"))
37
+
38
+ try:
39
+ lfd = LockedFD(my_file)
40
+ lockfilepath = lfd._lockfilepath()
41
+
42
+ # cannot end before it was started
43
+ self.assertRaises(AssertionError, lfd.rollback)
44
+ self.assertRaises(AssertionError, lfd.commit)
45
+
46
+ # open for writing
47
+ assert not os.path.isfile(lockfilepath)
48
+ wfd = lfd.open(write=True)
49
+ assert lfd._fd is wfd
50
+ assert os.path.isfile(lockfilepath)
51
+
52
+ # write data and fail
53
+ os.write(wfd, new_data.encode("ascii"))
54
+ lfd.rollback()
55
+ assert lfd._fd is None
56
+ self._cmp_contents(my_file, orig_data)
57
+ assert not os.path.isfile(lockfilepath)
58
+
59
+ # additional call doesn't fail
60
+ lfd.commit()
61
+ lfd.rollback()
62
+
63
+ # test reading
64
+ lfd = LockedFD(my_file)
65
+ rfd = lfd.open(write=False)
66
+ assert os.read(rfd, len(orig_data)) == orig_data.encode("ascii")
67
+
68
+ assert os.path.isfile(lockfilepath)
69
+ # deletion rolls back
70
+ del(lfd)
71
+ assert not os.path.isfile(lockfilepath)
72
+
73
+ # write data - concurrently
74
+ lfd = LockedFD(my_file)
75
+ olfd = LockedFD(my_file)
76
+ assert not os.path.isfile(lockfilepath)
77
+ wfdstream = lfd.open(write=True, stream=True) # this time as stream
78
+ assert os.path.isfile(lockfilepath)
79
+ # another one fails
80
+ self.assertRaises(IOError, olfd.open)
81
+
82
+ wfdstream.write(new_data.encode("ascii"))
83
+ lfd.commit()
84
+ assert not os.path.isfile(lockfilepath)
85
+ self._cmp_contents(my_file, new_data)
86
+
87
+ # could test automatic _end_writing on destruction
88
+ finally:
89
+ os.remove(my_file)
90
+ # END final cleanup
91
+
92
+ # try non-existing file for reading
93
+ lfd = LockedFD(tempfile.mktemp())
94
+ try:
95
+ lfd.open(write=False)
96
+ except OSError:
97
+ assert not os.path.exists(lfd._lockfilepath())
98
+ else:
99
+ self.fail("expected OSError")
100
+ # END handle exceptions
valley/lib/python3.10/site-packages/gitdb/utils/__init__.py ADDED
File without changes
valley/lib/python3.10/site-packages/gitdb/utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (163 Bytes). View file
 
valley/lib/python3.10/site-packages/gitdb/utils/__pycache__/encoding.cpython-310.pyc ADDED
Binary file (541 Bytes). View file
 
valley/lib/python3.10/site-packages/gitdb/utils/encoding.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def force_bytes(data, encoding="utf-8"):
2
+ if isinstance(data, bytes):
3
+ return data
4
+
5
+ if isinstance(data, str):
6
+ return data.encode(encoding)
7
+
8
+ return data
9
+
10
+
11
+ def force_text(data, encoding="utf-8"):
12
+ if isinstance(data, str):
13
+ return data
14
+
15
+ if isinstance(data, bytes):
16
+ return data.decode(encoding)
17
+
18
+ return str(data, encoding)
valley/lib/python3.10/site-packages/huggingface_hub/__init__.py ADDED
@@ -0,0 +1,968 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # ***********
16
+ # `huggingface_hub` init has 2 modes:
17
+ # - Normal usage:
18
+ # If imported to use it, all modules and functions are lazy-loaded. This means
19
+ # they exist at top level in module but are imported only the first time they are
20
+ # used. This way, `from huggingface_hub import something` will import `something`
21
+ # quickly without the hassle of importing all the features from `huggingface_hub`.
22
+ # - Static check:
23
+ # If statically analyzed, all modules and functions are loaded normally. This way
24
+ # static typing check works properly as well as autocomplete in text editors and
25
+ # IDEs.
26
+ #
27
+ # The static model imports are done inside the `if TYPE_CHECKING:` statement at
28
+ # the bottom of this file. Since module/functions imports are duplicated, it is
29
+ # mandatory to make sure to add them twice when adding one. This is checked in the
30
+ # `make quality` command.
31
+ #
32
+ # To update the static imports, please run the following command and commit the changes.
33
+ # ```
34
+ # # Use script
35
+ # python utils/check_static_imports.py --update-file
36
+ #
37
+ # # Or run style on codebase
38
+ # make style
39
+ # ```
40
+ #
41
+ # ***********
42
+ # Lazy loader vendored from https://github.com/scientific-python/lazy_loader
43
+ import importlib
44
+ import os
45
+ import sys
46
+ from typing import TYPE_CHECKING
47
+
48
+
49
+ __version__ = "0.25.1"
50
+
51
+ # Alphabetical order of definitions is ensured in tests
52
+ # WARNING: any comment added in this dictionary definition will be lost when
53
+ # re-generating the file !
54
+ _SUBMOD_ATTRS = {
55
+ "_commit_scheduler": [
56
+ "CommitScheduler",
57
+ ],
58
+ "_inference_endpoints": [
59
+ "InferenceEndpoint",
60
+ "InferenceEndpointError",
61
+ "InferenceEndpointStatus",
62
+ "InferenceEndpointTimeoutError",
63
+ "InferenceEndpointType",
64
+ ],
65
+ "_login": [
66
+ "interpreter_login",
67
+ "login",
68
+ "logout",
69
+ "notebook_login",
70
+ ],
71
+ "_multi_commits": [
72
+ "MultiCommitException",
73
+ "plan_multi_commits",
74
+ ],
75
+ "_snapshot_download": [
76
+ "snapshot_download",
77
+ ],
78
+ "_space_api": [
79
+ "SpaceHardware",
80
+ "SpaceRuntime",
81
+ "SpaceStage",
82
+ "SpaceStorage",
83
+ "SpaceVariable",
84
+ ],
85
+ "_tensorboard_logger": [
86
+ "HFSummaryWriter",
87
+ ],
88
+ "_webhooks_payload": [
89
+ "WebhookPayload",
90
+ "WebhookPayloadComment",
91
+ "WebhookPayloadDiscussion",
92
+ "WebhookPayloadDiscussionChanges",
93
+ "WebhookPayloadEvent",
94
+ "WebhookPayloadMovedTo",
95
+ "WebhookPayloadRepo",
96
+ "WebhookPayloadUrl",
97
+ "WebhookPayloadWebhook",
98
+ ],
99
+ "_webhooks_server": [
100
+ "WebhooksServer",
101
+ "webhook_endpoint",
102
+ ],
103
+ "community": [
104
+ "Discussion",
105
+ "DiscussionComment",
106
+ "DiscussionCommit",
107
+ "DiscussionEvent",
108
+ "DiscussionStatusChange",
109
+ "DiscussionTitleChange",
110
+ "DiscussionWithDetails",
111
+ ],
112
+ "constants": [
113
+ "CONFIG_NAME",
114
+ "FLAX_WEIGHTS_NAME",
115
+ "HUGGINGFACE_CO_URL_HOME",
116
+ "HUGGINGFACE_CO_URL_TEMPLATE",
117
+ "PYTORCH_WEIGHTS_NAME",
118
+ "REPO_TYPE_DATASET",
119
+ "REPO_TYPE_MODEL",
120
+ "REPO_TYPE_SPACE",
121
+ "TF2_WEIGHTS_NAME",
122
+ "TF_WEIGHTS_NAME",
123
+ ],
124
+ "fastai_utils": [
125
+ "_save_pretrained_fastai",
126
+ "from_pretrained_fastai",
127
+ "push_to_hub_fastai",
128
+ ],
129
+ "file_download": [
130
+ "HfFileMetadata",
131
+ "_CACHED_NO_EXIST",
132
+ "cached_download",
133
+ "get_hf_file_metadata",
134
+ "hf_hub_download",
135
+ "hf_hub_url",
136
+ "try_to_load_from_cache",
137
+ ],
138
+ "hf_api": [
139
+ "Collection",
140
+ "CollectionItem",
141
+ "CommitInfo",
142
+ "CommitOperation",
143
+ "CommitOperationAdd",
144
+ "CommitOperationCopy",
145
+ "CommitOperationDelete",
146
+ "DatasetInfo",
147
+ "GitCommitInfo",
148
+ "GitRefInfo",
149
+ "GitRefs",
150
+ "HfApi",
151
+ "ModelInfo",
152
+ "RepoUrl",
153
+ "SpaceInfo",
154
+ "User",
155
+ "UserLikes",
156
+ "WebhookInfo",
157
+ "WebhookWatchedItem",
158
+ "accept_access_request",
159
+ "add_collection_item",
160
+ "add_space_secret",
161
+ "add_space_variable",
162
+ "auth_check",
163
+ "cancel_access_request",
164
+ "change_discussion_status",
165
+ "comment_discussion",
166
+ "create_branch",
167
+ "create_collection",
168
+ "create_commit",
169
+ "create_commits_on_pr",
170
+ "create_discussion",
171
+ "create_inference_endpoint",
172
+ "create_pull_request",
173
+ "create_repo",
174
+ "create_tag",
175
+ "create_webhook",
176
+ "dataset_info",
177
+ "delete_branch",
178
+ "delete_collection",
179
+ "delete_collection_item",
180
+ "delete_file",
181
+ "delete_folder",
182
+ "delete_inference_endpoint",
183
+ "delete_repo",
184
+ "delete_space_secret",
185
+ "delete_space_storage",
186
+ "delete_space_variable",
187
+ "delete_tag",
188
+ "delete_webhook",
189
+ "disable_webhook",
190
+ "duplicate_space",
191
+ "edit_discussion_comment",
192
+ "enable_webhook",
193
+ "file_exists",
194
+ "get_collection",
195
+ "get_dataset_tags",
196
+ "get_discussion_details",
197
+ "get_full_repo_name",
198
+ "get_inference_endpoint",
199
+ "get_model_tags",
200
+ "get_paths_info",
201
+ "get_repo_discussions",
202
+ "get_safetensors_metadata",
203
+ "get_space_runtime",
204
+ "get_space_variables",
205
+ "get_token_permission",
206
+ "get_user_overview",
207
+ "get_webhook",
208
+ "grant_access",
209
+ "like",
210
+ "list_accepted_access_requests",
211
+ "list_collections",
212
+ "list_datasets",
213
+ "list_inference_endpoints",
214
+ "list_liked_repos",
215
+ "list_metrics",
216
+ "list_models",
217
+ "list_organization_members",
218
+ "list_pending_access_requests",
219
+ "list_rejected_access_requests",
220
+ "list_repo_commits",
221
+ "list_repo_files",
222
+ "list_repo_likers",
223
+ "list_repo_refs",
224
+ "list_repo_tree",
225
+ "list_spaces",
226
+ "list_user_followers",
227
+ "list_user_following",
228
+ "list_webhooks",
229
+ "merge_pull_request",
230
+ "model_info",
231
+ "move_repo",
232
+ "parse_safetensors_file_metadata",
233
+ "pause_inference_endpoint",
234
+ "pause_space",
235
+ "preupload_lfs_files",
236
+ "reject_access_request",
237
+ "rename_discussion",
238
+ "repo_exists",
239
+ "repo_info",
240
+ "repo_type_and_id_from_hf_id",
241
+ "request_space_hardware",
242
+ "request_space_storage",
243
+ "restart_space",
244
+ "resume_inference_endpoint",
245
+ "revision_exists",
246
+ "run_as_future",
247
+ "scale_to_zero_inference_endpoint",
248
+ "set_space_sleep_time",
249
+ "space_info",
250
+ "super_squash_history",
251
+ "unlike",
252
+ "update_collection_item",
253
+ "update_collection_metadata",
254
+ "update_inference_endpoint",
255
+ "update_repo_settings",
256
+ "update_repo_visibility",
257
+ "update_webhook",
258
+ "upload_file",
259
+ "upload_folder",
260
+ "upload_large_folder",
261
+ "whoami",
262
+ ],
263
+ "hf_file_system": [
264
+ "HfFileSystem",
265
+ "HfFileSystemFile",
266
+ "HfFileSystemResolvedPath",
267
+ "HfFileSystemStreamFile",
268
+ ],
269
+ "hub_mixin": [
270
+ "ModelHubMixin",
271
+ "PyTorchModelHubMixin",
272
+ ],
273
+ "inference._client": [
274
+ "InferenceClient",
275
+ "InferenceTimeoutError",
276
+ ],
277
+ "inference._generated._async_client": [
278
+ "AsyncInferenceClient",
279
+ ],
280
+ "inference._generated.types": [
281
+ "AudioClassificationInput",
282
+ "AudioClassificationOutputElement",
283
+ "AudioClassificationParameters",
284
+ "AudioToAudioInput",
285
+ "AudioToAudioOutputElement",
286
+ "AutomaticSpeechRecognitionGenerationParameters",
287
+ "AutomaticSpeechRecognitionInput",
288
+ "AutomaticSpeechRecognitionOutput",
289
+ "AutomaticSpeechRecognitionOutputChunk",
290
+ "AutomaticSpeechRecognitionParameters",
291
+ "ChatCompletionInput",
292
+ "ChatCompletionInputFunctionDefinition",
293
+ "ChatCompletionInputFunctionName",
294
+ "ChatCompletionInputGrammarType",
295
+ "ChatCompletionInputMessage",
296
+ "ChatCompletionInputMessageChunk",
297
+ "ChatCompletionInputTool",
298
+ "ChatCompletionInputToolTypeClass",
299
+ "ChatCompletionInputURL",
300
+ "ChatCompletionOutput",
301
+ "ChatCompletionOutputComplete",
302
+ "ChatCompletionOutputFunctionDefinition",
303
+ "ChatCompletionOutputLogprob",
304
+ "ChatCompletionOutputLogprobs",
305
+ "ChatCompletionOutputMessage",
306
+ "ChatCompletionOutputToolCall",
307
+ "ChatCompletionOutputTopLogprob",
308
+ "ChatCompletionOutputUsage",
309
+ "ChatCompletionStreamOutput",
310
+ "ChatCompletionStreamOutputChoice",
311
+ "ChatCompletionStreamOutputDelta",
312
+ "ChatCompletionStreamOutputDeltaToolCall",
313
+ "ChatCompletionStreamOutputFunction",
314
+ "ChatCompletionStreamOutputLogprob",
315
+ "ChatCompletionStreamOutputLogprobs",
316
+ "ChatCompletionStreamOutputTopLogprob",
317
+ "DepthEstimationInput",
318
+ "DepthEstimationOutput",
319
+ "DocumentQuestionAnsweringInput",
320
+ "DocumentQuestionAnsweringInputData",
321
+ "DocumentQuestionAnsweringOutputElement",
322
+ "DocumentQuestionAnsweringParameters",
323
+ "FeatureExtractionInput",
324
+ "FillMaskInput",
325
+ "FillMaskOutputElement",
326
+ "FillMaskParameters",
327
+ "ImageClassificationInput",
328
+ "ImageClassificationOutputElement",
329
+ "ImageClassificationParameters",
330
+ "ImageSegmentationInput",
331
+ "ImageSegmentationOutputElement",
332
+ "ImageSegmentationParameters",
333
+ "ImageToImageInput",
334
+ "ImageToImageOutput",
335
+ "ImageToImageParameters",
336
+ "ImageToImageTargetSize",
337
+ "ImageToTextGenerationParameters",
338
+ "ImageToTextInput",
339
+ "ImageToTextOutput",
340
+ "ImageToTextParameters",
341
+ "ObjectDetectionBoundingBox",
342
+ "ObjectDetectionInput",
343
+ "ObjectDetectionOutputElement",
344
+ "ObjectDetectionParameters",
345
+ "QuestionAnsweringInput",
346
+ "QuestionAnsweringInputData",
347
+ "QuestionAnsweringOutputElement",
348
+ "QuestionAnsweringParameters",
349
+ "SentenceSimilarityInput",
350
+ "SentenceSimilarityInputData",
351
+ "SummarizationGenerationParameters",
352
+ "SummarizationInput",
353
+ "SummarizationOutput",
354
+ "TableQuestionAnsweringInput",
355
+ "TableQuestionAnsweringInputData",
356
+ "TableQuestionAnsweringOutputElement",
357
+ "Text2TextGenerationInput",
358
+ "Text2TextGenerationOutput",
359
+ "Text2TextGenerationParameters",
360
+ "TextClassificationInput",
361
+ "TextClassificationOutputElement",
362
+ "TextClassificationParameters",
363
+ "TextGenerationInput",
364
+ "TextGenerationInputGenerateParameters",
365
+ "TextGenerationInputGrammarType",
366
+ "TextGenerationOutput",
367
+ "TextGenerationOutputBestOfSequence",
368
+ "TextGenerationOutputDetails",
369
+ "TextGenerationOutputPrefillToken",
370
+ "TextGenerationOutputToken",
371
+ "TextGenerationStreamOutput",
372
+ "TextGenerationStreamOutputStreamDetails",
373
+ "TextGenerationStreamOutputToken",
374
+ "TextToAudioGenerationParameters",
375
+ "TextToAudioInput",
376
+ "TextToAudioOutput",
377
+ "TextToAudioParameters",
378
+ "TextToImageInput",
379
+ "TextToImageOutput",
380
+ "TextToImageParameters",
381
+ "TextToImageTargetSize",
382
+ "TokenClassificationInput",
383
+ "TokenClassificationOutputElement",
384
+ "TokenClassificationParameters",
385
+ "TranslationGenerationParameters",
386
+ "TranslationInput",
387
+ "TranslationOutput",
388
+ "VideoClassificationInput",
389
+ "VideoClassificationOutputElement",
390
+ "VideoClassificationParameters",
391
+ "VisualQuestionAnsweringInput",
392
+ "VisualQuestionAnsweringInputData",
393
+ "VisualQuestionAnsweringOutputElement",
394
+ "VisualQuestionAnsweringParameters",
395
+ "ZeroShotClassificationInput",
396
+ "ZeroShotClassificationInputData",
397
+ "ZeroShotClassificationOutputElement",
398
+ "ZeroShotClassificationParameters",
399
+ "ZeroShotImageClassificationInput",
400
+ "ZeroShotImageClassificationInputData",
401
+ "ZeroShotImageClassificationOutputElement",
402
+ "ZeroShotImageClassificationParameters",
403
+ "ZeroShotObjectDetectionBoundingBox",
404
+ "ZeroShotObjectDetectionInput",
405
+ "ZeroShotObjectDetectionInputData",
406
+ "ZeroShotObjectDetectionOutputElement",
407
+ ],
408
+ "inference_api": [
409
+ "InferenceApi",
410
+ ],
411
+ "keras_mixin": [
412
+ "KerasModelHubMixin",
413
+ "from_pretrained_keras",
414
+ "push_to_hub_keras",
415
+ "save_pretrained_keras",
416
+ ],
417
+ "repocard": [
418
+ "DatasetCard",
419
+ "ModelCard",
420
+ "RepoCard",
421
+ "SpaceCard",
422
+ "metadata_eval_result",
423
+ "metadata_load",
424
+ "metadata_save",
425
+ "metadata_update",
426
+ ],
427
+ "repocard_data": [
428
+ "CardData",
429
+ "DatasetCardData",
430
+ "EvalResult",
431
+ "ModelCardData",
432
+ "SpaceCardData",
433
+ ],
434
+ "repository": [
435
+ "Repository",
436
+ ],
437
+ "serialization": [
438
+ "StateDictSplit",
439
+ "get_tf_storage_size",
440
+ "get_torch_storage_id",
441
+ "get_torch_storage_size",
442
+ "save_torch_model",
443
+ "save_torch_state_dict",
444
+ "split_state_dict_into_shards_factory",
445
+ "split_tf_state_dict_into_shards",
446
+ "split_torch_state_dict_into_shards",
447
+ ],
448
+ "utils": [
449
+ "CacheNotFound",
450
+ "CachedFileInfo",
451
+ "CachedRepoInfo",
452
+ "CachedRevisionInfo",
453
+ "CorruptedCacheException",
454
+ "DeleteCacheStrategy",
455
+ "HFCacheInfo",
456
+ "HfFolder",
457
+ "cached_assets_path",
458
+ "configure_http_backend",
459
+ "dump_environment_info",
460
+ "get_session",
461
+ "get_token",
462
+ "logging",
463
+ "scan_cache_dir",
464
+ ],
465
+ }
466
+
467
+
468
+ def _attach(package_name, submodules=None, submod_attrs=None):
469
+ """Attach lazily loaded submodules, functions, or other attributes.
470
+
471
+ Typically, modules import submodules and attributes as follows:
472
+
473
+ ```py
474
+ import mysubmodule
475
+ import anothersubmodule
476
+
477
+ from .foo import someattr
478
+ ```
479
+
480
+ The idea is to replace a package's `__getattr__`, `__dir__`, and
481
+ `__all__`, such that all imports work exactly the way they would
482
+ with normal imports, except that the import occurs upon first use.
483
+
484
+ The typical way to call this function, replacing the above imports, is:
485
+
486
+ ```python
487
+ __getattr__, __dir__, __all__ = lazy.attach(
488
+ __name__,
489
+ ['mysubmodule', 'anothersubmodule'],
490
+ {'foo': ['someattr']}
491
+ )
492
+ ```
493
+ This functionality requires Python 3.7 or higher.
494
+
495
+ Args:
496
+ package_name (`str`):
497
+ Typically use `__name__`.
498
+ submodules (`set`):
499
+ List of submodules to attach.
500
+ submod_attrs (`dict`):
501
+ Dictionary of submodule -> list of attributes / functions.
502
+ These attributes are imported as they are used.
503
+
504
+ Returns:
505
+ __getattr__, __dir__, __all__
506
+
507
+ """
508
+ if submod_attrs is None:
509
+ submod_attrs = {}
510
+
511
+ if submodules is None:
512
+ submodules = set()
513
+ else:
514
+ submodules = set(submodules)
515
+
516
+ attr_to_modules = {attr: mod for mod, attrs in submod_attrs.items() for attr in attrs}
517
+
518
+ __all__ = list(submodules | attr_to_modules.keys())
519
+
520
+ def __getattr__(name):
521
+ if name in submodules:
522
+ try:
523
+ return importlib.import_module(f"{package_name}.{name}")
524
+ except Exception as e:
525
+ print(f"Error importing {package_name}.{name}: {e}")
526
+ raise
527
+ elif name in attr_to_modules:
528
+ submod_path = f"{package_name}.{attr_to_modules[name]}"
529
+ try:
530
+ submod = importlib.import_module(submod_path)
531
+ except Exception as e:
532
+ print(f"Error importing {submod_path}: {e}")
533
+ raise
534
+ attr = getattr(submod, name)
535
+
536
+ # If the attribute lives in a file (module) with the same
537
+ # name as the attribute, ensure that the attribute and *not*
538
+ # the module is accessible on the package.
539
+ if name == attr_to_modules[name]:
540
+ pkg = sys.modules[package_name]
541
+ pkg.__dict__[name] = attr
542
+
543
+ return attr
544
+ else:
545
+ raise AttributeError(f"No {package_name} attribute {name}")
546
+
547
+ def __dir__():
548
+ return __all__
549
+
550
+ return __getattr__, __dir__, list(__all__)
551
+
552
+
553
+ __getattr__, __dir__, __all__ = _attach(__name__, submodules=[], submod_attrs=_SUBMOD_ATTRS)
554
+
555
+ if os.environ.get("EAGER_IMPORT", ""):
556
+ for attr in __all__:
557
+ __getattr__(attr)
558
+
559
+ # WARNING: any content below this statement is generated automatically. Any manual edit
560
+ # will be lost when re-generating this file !
561
+ #
562
+ # To update the static imports, please run the following command and commit the changes.
563
+ # ```
564
+ # # Use script
565
+ # python utils/check_static_imports.py --update-file
566
+ #
567
+ # # Or run style on codebase
568
+ # make style
569
+ # ```
570
+ if TYPE_CHECKING: # pragma: no cover
571
+ from ._commit_scheduler import CommitScheduler # noqa: F401
572
+ from ._inference_endpoints import (
573
+ InferenceEndpoint, # noqa: F401
574
+ InferenceEndpointError, # noqa: F401
575
+ InferenceEndpointStatus, # noqa: F401
576
+ InferenceEndpointTimeoutError, # noqa: F401
577
+ InferenceEndpointType, # noqa: F401
578
+ )
579
+ from ._login import (
580
+ interpreter_login, # noqa: F401
581
+ login, # noqa: F401
582
+ logout, # noqa: F401
583
+ notebook_login, # noqa: F401
584
+ )
585
+ from ._multi_commits import (
586
+ MultiCommitException, # noqa: F401
587
+ plan_multi_commits, # noqa: F401
588
+ )
589
+ from ._snapshot_download import snapshot_download # noqa: F401
590
+ from ._space_api import (
591
+ SpaceHardware, # noqa: F401
592
+ SpaceRuntime, # noqa: F401
593
+ SpaceStage, # noqa: F401
594
+ SpaceStorage, # noqa: F401
595
+ SpaceVariable, # noqa: F401
596
+ )
597
+ from ._tensorboard_logger import HFSummaryWriter # noqa: F401
598
+ from ._webhooks_payload import (
599
+ WebhookPayload, # noqa: F401
600
+ WebhookPayloadComment, # noqa: F401
601
+ WebhookPayloadDiscussion, # noqa: F401
602
+ WebhookPayloadDiscussionChanges, # noqa: F401
603
+ WebhookPayloadEvent, # noqa: F401
604
+ WebhookPayloadMovedTo, # noqa: F401
605
+ WebhookPayloadRepo, # noqa: F401
606
+ WebhookPayloadUrl, # noqa: F401
607
+ WebhookPayloadWebhook, # noqa: F401
608
+ )
609
+ from ._webhooks_server import (
610
+ WebhooksServer, # noqa: F401
611
+ webhook_endpoint, # noqa: F401
612
+ )
613
+ from .community import (
614
+ Discussion, # noqa: F401
615
+ DiscussionComment, # noqa: F401
616
+ DiscussionCommit, # noqa: F401
617
+ DiscussionEvent, # noqa: F401
618
+ DiscussionStatusChange, # noqa: F401
619
+ DiscussionTitleChange, # noqa: F401
620
+ DiscussionWithDetails, # noqa: F401
621
+ )
622
+ from .constants import (
623
+ CONFIG_NAME, # noqa: F401
624
+ FLAX_WEIGHTS_NAME, # noqa: F401
625
+ HUGGINGFACE_CO_URL_HOME, # noqa: F401
626
+ HUGGINGFACE_CO_URL_TEMPLATE, # noqa: F401
627
+ PYTORCH_WEIGHTS_NAME, # noqa: F401
628
+ REPO_TYPE_DATASET, # noqa: F401
629
+ REPO_TYPE_MODEL, # noqa: F401
630
+ REPO_TYPE_SPACE, # noqa: F401
631
+ TF2_WEIGHTS_NAME, # noqa: F401
632
+ TF_WEIGHTS_NAME, # noqa: F401
633
+ )
634
+ from .fastai_utils import (
635
+ _save_pretrained_fastai, # noqa: F401
636
+ from_pretrained_fastai, # noqa: F401
637
+ push_to_hub_fastai, # noqa: F401
638
+ )
639
+ from .file_download import (
640
+ _CACHED_NO_EXIST, # noqa: F401
641
+ HfFileMetadata, # noqa: F401
642
+ cached_download, # noqa: F401
643
+ get_hf_file_metadata, # noqa: F401
644
+ hf_hub_download, # noqa: F401
645
+ hf_hub_url, # noqa: F401
646
+ try_to_load_from_cache, # noqa: F401
647
+ )
648
+ from .hf_api import (
649
+ Collection, # noqa: F401
650
+ CollectionItem, # noqa: F401
651
+ CommitInfo, # noqa: F401
652
+ CommitOperation, # noqa: F401
653
+ CommitOperationAdd, # noqa: F401
654
+ CommitOperationCopy, # noqa: F401
655
+ CommitOperationDelete, # noqa: F401
656
+ DatasetInfo, # noqa: F401
657
+ GitCommitInfo, # noqa: F401
658
+ GitRefInfo, # noqa: F401
659
+ GitRefs, # noqa: F401
660
+ HfApi, # noqa: F401
661
+ ModelInfo, # noqa: F401
662
+ RepoUrl, # noqa: F401
663
+ SpaceInfo, # noqa: F401
664
+ User, # noqa: F401
665
+ UserLikes, # noqa: F401
666
+ WebhookInfo, # noqa: F401
667
+ WebhookWatchedItem, # noqa: F401
668
+ accept_access_request, # noqa: F401
669
+ add_collection_item, # noqa: F401
670
+ add_space_secret, # noqa: F401
671
+ add_space_variable, # noqa: F401
672
+ auth_check, # noqa: F401
673
+ cancel_access_request, # noqa: F401
674
+ change_discussion_status, # noqa: F401
675
+ comment_discussion, # noqa: F401
676
+ create_branch, # noqa: F401
677
+ create_collection, # noqa: F401
678
+ create_commit, # noqa: F401
679
+ create_commits_on_pr, # noqa: F401
680
+ create_discussion, # noqa: F401
681
+ create_inference_endpoint, # noqa: F401
682
+ create_pull_request, # noqa: F401
683
+ create_repo, # noqa: F401
684
+ create_tag, # noqa: F401
685
+ create_webhook, # noqa: F401
686
+ dataset_info, # noqa: F401
687
+ delete_branch, # noqa: F401
688
+ delete_collection, # noqa: F401
689
+ delete_collection_item, # noqa: F401
690
+ delete_file, # noqa: F401
691
+ delete_folder, # noqa: F401
692
+ delete_inference_endpoint, # noqa: F401
693
+ delete_repo, # noqa: F401
694
+ delete_space_secret, # noqa: F401
695
+ delete_space_storage, # noqa: F401
696
+ delete_space_variable, # noqa: F401
697
+ delete_tag, # noqa: F401
698
+ delete_webhook, # noqa: F401
699
+ disable_webhook, # noqa: F401
700
+ duplicate_space, # noqa: F401
701
+ edit_discussion_comment, # noqa: F401
702
+ enable_webhook, # noqa: F401
703
+ file_exists, # noqa: F401
704
+ get_collection, # noqa: F401
705
+ get_dataset_tags, # noqa: F401
706
+ get_discussion_details, # noqa: F401
707
+ get_full_repo_name, # noqa: F401
708
+ get_inference_endpoint, # noqa: F401
709
+ get_model_tags, # noqa: F401
710
+ get_paths_info, # noqa: F401
711
+ get_repo_discussions, # noqa: F401
712
+ get_safetensors_metadata, # noqa: F401
713
+ get_space_runtime, # noqa: F401
714
+ get_space_variables, # noqa: F401
715
+ get_token_permission, # noqa: F401
716
+ get_user_overview, # noqa: F401
717
+ get_webhook, # noqa: F401
718
+ grant_access, # noqa: F401
719
+ like, # noqa: F401
720
+ list_accepted_access_requests, # noqa: F401
721
+ list_collections, # noqa: F401
722
+ list_datasets, # noqa: F401
723
+ list_inference_endpoints, # noqa: F401
724
+ list_liked_repos, # noqa: F401
725
+ list_metrics, # noqa: F401
726
+ list_models, # noqa: F401
727
+ list_organization_members, # noqa: F401
728
+ list_pending_access_requests, # noqa: F401
729
+ list_rejected_access_requests, # noqa: F401
730
+ list_repo_commits, # noqa: F401
731
+ list_repo_files, # noqa: F401
732
+ list_repo_likers, # noqa: F401
733
+ list_repo_refs, # noqa: F401
734
+ list_repo_tree, # noqa: F401
735
+ list_spaces, # noqa: F401
736
+ list_user_followers, # noqa: F401
737
+ list_user_following, # noqa: F401
738
+ list_webhooks, # noqa: F401
739
+ merge_pull_request, # noqa: F401
740
+ model_info, # noqa: F401
741
+ move_repo, # noqa: F401
742
+ parse_safetensors_file_metadata, # noqa: F401
743
+ pause_inference_endpoint, # noqa: F401
744
+ pause_space, # noqa: F401
745
+ preupload_lfs_files, # noqa: F401
746
+ reject_access_request, # noqa: F401
747
+ rename_discussion, # noqa: F401
748
+ repo_exists, # noqa: F401
749
+ repo_info, # noqa: F401
750
+ repo_type_and_id_from_hf_id, # noqa: F401
751
+ request_space_hardware, # noqa: F401
752
+ request_space_storage, # noqa: F401
753
+ restart_space, # noqa: F401
754
+ resume_inference_endpoint, # noqa: F401
755
+ revision_exists, # noqa: F401
756
+ run_as_future, # noqa: F401
757
+ scale_to_zero_inference_endpoint, # noqa: F401
758
+ set_space_sleep_time, # noqa: F401
759
+ space_info, # noqa: F401
760
+ super_squash_history, # noqa: F401
761
+ unlike, # noqa: F401
762
+ update_collection_item, # noqa: F401
763
+ update_collection_metadata, # noqa: F401
764
+ update_inference_endpoint, # noqa: F401
765
+ update_repo_settings, # noqa: F401
766
+ update_repo_visibility, # noqa: F401
767
+ update_webhook, # noqa: F401
768
+ upload_file, # noqa: F401
769
+ upload_folder, # noqa: F401
770
+ upload_large_folder, # noqa: F401
771
+ whoami, # noqa: F401
772
+ )
773
+ from .hf_file_system import (
774
+ HfFileSystem, # noqa: F401
775
+ HfFileSystemFile, # noqa: F401
776
+ HfFileSystemResolvedPath, # noqa: F401
777
+ HfFileSystemStreamFile, # noqa: F401
778
+ )
779
+ from .hub_mixin import (
780
+ ModelHubMixin, # noqa: F401
781
+ PyTorchModelHubMixin, # noqa: F401
782
+ )
783
+ from .inference._client import (
784
+ InferenceClient, # noqa: F401
785
+ InferenceTimeoutError, # noqa: F401
786
+ )
787
+ from .inference._generated._async_client import AsyncInferenceClient # noqa: F401
788
+ from .inference._generated.types import (
789
+ AudioClassificationInput, # noqa: F401
790
+ AudioClassificationOutputElement, # noqa: F401
791
+ AudioClassificationParameters, # noqa: F401
792
+ AudioToAudioInput, # noqa: F401
793
+ AudioToAudioOutputElement, # noqa: F401
794
+ AutomaticSpeechRecognitionGenerationParameters, # noqa: F401
795
+ AutomaticSpeechRecognitionInput, # noqa: F401
796
+ AutomaticSpeechRecognitionOutput, # noqa: F401
797
+ AutomaticSpeechRecognitionOutputChunk, # noqa: F401
798
+ AutomaticSpeechRecognitionParameters, # noqa: F401
799
+ ChatCompletionInput, # noqa: F401
800
+ ChatCompletionInputFunctionDefinition, # noqa: F401
801
+ ChatCompletionInputFunctionName, # noqa: F401
802
+ ChatCompletionInputGrammarType, # noqa: F401
803
+ ChatCompletionInputMessage, # noqa: F401
804
+ ChatCompletionInputMessageChunk, # noqa: F401
805
+ ChatCompletionInputTool, # noqa: F401
806
+ ChatCompletionInputToolTypeClass, # noqa: F401
807
+ ChatCompletionInputURL, # noqa: F401
808
+ ChatCompletionOutput, # noqa: F401
809
+ ChatCompletionOutputComplete, # noqa: F401
810
+ ChatCompletionOutputFunctionDefinition, # noqa: F401
811
+ ChatCompletionOutputLogprob, # noqa: F401
812
+ ChatCompletionOutputLogprobs, # noqa: F401
813
+ ChatCompletionOutputMessage, # noqa: F401
814
+ ChatCompletionOutputToolCall, # noqa: F401
815
+ ChatCompletionOutputTopLogprob, # noqa: F401
816
+ ChatCompletionOutputUsage, # noqa: F401
817
+ ChatCompletionStreamOutput, # noqa: F401
818
+ ChatCompletionStreamOutputChoice, # noqa: F401
819
+ ChatCompletionStreamOutputDelta, # noqa: F401
820
+ ChatCompletionStreamOutputDeltaToolCall, # noqa: F401
821
+ ChatCompletionStreamOutputFunction, # noqa: F401
822
+ ChatCompletionStreamOutputLogprob, # noqa: F401
823
+ ChatCompletionStreamOutputLogprobs, # noqa: F401
824
+ ChatCompletionStreamOutputTopLogprob, # noqa: F401
825
+ DepthEstimationInput, # noqa: F401
826
+ DepthEstimationOutput, # noqa: F401
827
+ DocumentQuestionAnsweringInput, # noqa: F401
828
+ DocumentQuestionAnsweringInputData, # noqa: F401
829
+ DocumentQuestionAnsweringOutputElement, # noqa: F401
830
+ DocumentQuestionAnsweringParameters, # noqa: F401
831
+ FeatureExtractionInput, # noqa: F401
832
+ FillMaskInput, # noqa: F401
833
+ FillMaskOutputElement, # noqa: F401
834
+ FillMaskParameters, # noqa: F401
835
+ ImageClassificationInput, # noqa: F401
836
+ ImageClassificationOutputElement, # noqa: F401
837
+ ImageClassificationParameters, # noqa: F401
838
+ ImageSegmentationInput, # noqa: F401
839
+ ImageSegmentationOutputElement, # noqa: F401
840
+ ImageSegmentationParameters, # noqa: F401
841
+ ImageToImageInput, # noqa: F401
842
+ ImageToImageOutput, # noqa: F401
843
+ ImageToImageParameters, # noqa: F401
844
+ ImageToImageTargetSize, # noqa: F401
845
+ ImageToTextGenerationParameters, # noqa: F401
846
+ ImageToTextInput, # noqa: F401
847
+ ImageToTextOutput, # noqa: F401
848
+ ImageToTextParameters, # noqa: F401
849
+ ObjectDetectionBoundingBox, # noqa: F401
850
+ ObjectDetectionInput, # noqa: F401
851
+ ObjectDetectionOutputElement, # noqa: F401
852
+ ObjectDetectionParameters, # noqa: F401
853
+ QuestionAnsweringInput, # noqa: F401
854
+ QuestionAnsweringInputData, # noqa: F401
855
+ QuestionAnsweringOutputElement, # noqa: F401
856
+ QuestionAnsweringParameters, # noqa: F401
857
+ SentenceSimilarityInput, # noqa: F401
858
+ SentenceSimilarityInputData, # noqa: F401
859
+ SummarizationGenerationParameters, # noqa: F401
860
+ SummarizationInput, # noqa: F401
861
+ SummarizationOutput, # noqa: F401
862
+ TableQuestionAnsweringInput, # noqa: F401
863
+ TableQuestionAnsweringInputData, # noqa: F401
864
+ TableQuestionAnsweringOutputElement, # noqa: F401
865
+ Text2TextGenerationInput, # noqa: F401
866
+ Text2TextGenerationOutput, # noqa: F401
867
+ Text2TextGenerationParameters, # noqa: F401
868
+ TextClassificationInput, # noqa: F401
869
+ TextClassificationOutputElement, # noqa: F401
870
+ TextClassificationParameters, # noqa: F401
871
+ TextGenerationInput, # noqa: F401
872
+ TextGenerationInputGenerateParameters, # noqa: F401
873
+ TextGenerationInputGrammarType, # noqa: F401
874
+ TextGenerationOutput, # noqa: F401
875
+ TextGenerationOutputBestOfSequence, # noqa: F401
876
+ TextGenerationOutputDetails, # noqa: F401
877
+ TextGenerationOutputPrefillToken, # noqa: F401
878
+ TextGenerationOutputToken, # noqa: F401
879
+ TextGenerationStreamOutput, # noqa: F401
880
+ TextGenerationStreamOutputStreamDetails, # noqa: F401
881
+ TextGenerationStreamOutputToken, # noqa: F401
882
+ TextToAudioGenerationParameters, # noqa: F401
883
+ TextToAudioInput, # noqa: F401
884
+ TextToAudioOutput, # noqa: F401
885
+ TextToAudioParameters, # noqa: F401
886
+ TextToImageInput, # noqa: F401
887
+ TextToImageOutput, # noqa: F401
888
+ TextToImageParameters, # noqa: F401
889
+ TextToImageTargetSize, # noqa: F401
890
+ TokenClassificationInput, # noqa: F401
891
+ TokenClassificationOutputElement, # noqa: F401
892
+ TokenClassificationParameters, # noqa: F401
893
+ TranslationGenerationParameters, # noqa: F401
894
+ TranslationInput, # noqa: F401
895
+ TranslationOutput, # noqa: F401
896
+ VideoClassificationInput, # noqa: F401
897
+ VideoClassificationOutputElement, # noqa: F401
898
+ VideoClassificationParameters, # noqa: F401
899
+ VisualQuestionAnsweringInput, # noqa: F401
900
+ VisualQuestionAnsweringInputData, # noqa: F401
901
+ VisualQuestionAnsweringOutputElement, # noqa: F401
902
+ VisualQuestionAnsweringParameters, # noqa: F401
903
+ ZeroShotClassificationInput, # noqa: F401
904
+ ZeroShotClassificationInputData, # noqa: F401
905
+ ZeroShotClassificationOutputElement, # noqa: F401
906
+ ZeroShotClassificationParameters, # noqa: F401
907
+ ZeroShotImageClassificationInput, # noqa: F401
908
+ ZeroShotImageClassificationInputData, # noqa: F401
909
+ ZeroShotImageClassificationOutputElement, # noqa: F401
910
+ ZeroShotImageClassificationParameters, # noqa: F401
911
+ ZeroShotObjectDetectionBoundingBox, # noqa: F401
912
+ ZeroShotObjectDetectionInput, # noqa: F401
913
+ ZeroShotObjectDetectionInputData, # noqa: F401
914
+ ZeroShotObjectDetectionOutputElement, # noqa: F401
915
+ )
916
+ from .inference_api import InferenceApi # noqa: F401
917
+ from .keras_mixin import (
918
+ KerasModelHubMixin, # noqa: F401
919
+ from_pretrained_keras, # noqa: F401
920
+ push_to_hub_keras, # noqa: F401
921
+ save_pretrained_keras, # noqa: F401
922
+ )
923
+ from .repocard import (
924
+ DatasetCard, # noqa: F401
925
+ ModelCard, # noqa: F401
926
+ RepoCard, # noqa: F401
927
+ SpaceCard, # noqa: F401
928
+ metadata_eval_result, # noqa: F401
929
+ metadata_load, # noqa: F401
930
+ metadata_save, # noqa: F401
931
+ metadata_update, # noqa: F401
932
+ )
933
+ from .repocard_data import (
934
+ CardData, # noqa: F401
935
+ DatasetCardData, # noqa: F401
936
+ EvalResult, # noqa: F401
937
+ ModelCardData, # noqa: F401
938
+ SpaceCardData, # noqa: F401
939
+ )
940
+ from .repository import Repository # noqa: F401
941
+ from .serialization import (
942
+ StateDictSplit, # noqa: F401
943
+ get_tf_storage_size, # noqa: F401
944
+ get_torch_storage_id, # noqa: F401
945
+ get_torch_storage_size, # noqa: F401
946
+ save_torch_model, # noqa: F401
947
+ save_torch_state_dict, # noqa: F401
948
+ split_state_dict_into_shards_factory, # noqa: F401
949
+ split_tf_state_dict_into_shards, # noqa: F401
950
+ split_torch_state_dict_into_shards, # noqa: F401
951
+ )
952
+ from .utils import (
953
+ CachedFileInfo, # noqa: F401
954
+ CachedRepoInfo, # noqa: F401
955
+ CachedRevisionInfo, # noqa: F401
956
+ CacheNotFound, # noqa: F401
957
+ CorruptedCacheException, # noqa: F401
958
+ DeleteCacheStrategy, # noqa: F401
959
+ HFCacheInfo, # noqa: F401
960
+ HfFolder, # noqa: F401
961
+ cached_assets_path, # noqa: F401
962
+ configure_http_backend, # noqa: F401
963
+ dump_environment_info, # noqa: F401
964
+ get_session, # noqa: F401
965
+ get_token, # noqa: F401
966
+ logging, # noqa: F401
967
+ scan_cache_dir, # noqa: F401
968
+ )
valley/lib/python3.10/site-packages/huggingface_hub/_commit_scheduler.py ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import atexit
2
+ import logging
3
+ import os
4
+ import time
5
+ from concurrent.futures import Future
6
+ from dataclasses import dataclass
7
+ from io import SEEK_END, SEEK_SET, BytesIO
8
+ from pathlib import Path
9
+ from threading import Lock, Thread
10
+ from typing import Dict, List, Optional, Union
11
+
12
+ from .hf_api import DEFAULT_IGNORE_PATTERNS, CommitInfo, CommitOperationAdd, HfApi
13
+ from .utils import filter_repo_objects
14
+
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ @dataclass(frozen=True)
20
+ class _FileToUpload:
21
+ """Temporary dataclass to store info about files to upload. Not meant to be used directly."""
22
+
23
+ local_path: Path
24
+ path_in_repo: str
25
+ size_limit: int
26
+ last_modified: float
27
+
28
+
29
+ class CommitScheduler:
30
+ """
31
+ Scheduler to upload a local folder to the Hub at regular intervals (e.g. push to hub every 5 minutes).
32
+
33
+ The scheduler is started when instantiated and run indefinitely. At the end of your script, a last commit is
34
+ triggered. Checkout the [upload guide](https://huggingface.co/docs/huggingface_hub/guides/upload#scheduled-uploads)
35
+ to learn more about how to use it.
36
+
37
+ Args:
38
+ repo_id (`str`):
39
+ The id of the repo to commit to.
40
+ folder_path (`str` or `Path`):
41
+ Path to the local folder to upload regularly.
42
+ every (`int` or `float`, *optional*):
43
+ The number of minutes between each commit. Defaults to 5 minutes.
44
+ path_in_repo (`str`, *optional*):
45
+ Relative path of the directory in the repo, for example: `"checkpoints/"`. Defaults to the root folder
46
+ of the repository.
47
+ repo_type (`str`, *optional*):
48
+ The type of the repo to commit to. Defaults to `model`.
49
+ revision (`str`, *optional*):
50
+ The revision of the repo to commit to. Defaults to `main`.
51
+ private (`bool`, *optional*):
52
+ Whether to make the repo private. Defaults to `False`. This value is ignored if the repo already exist.
53
+ token (`str`, *optional*):
54
+ The token to use to commit to the repo. Defaults to the token saved on the machine.
55
+ allow_patterns (`List[str]` or `str`, *optional*):
56
+ If provided, only files matching at least one pattern are uploaded.
57
+ ignore_patterns (`List[str]` or `str`, *optional*):
58
+ If provided, files matching any of the patterns are not uploaded.
59
+ squash_history (`bool`, *optional*):
60
+ Whether to squash the history of the repo after each commit. Defaults to `False`. Squashing commits is
61
+ useful to avoid degraded performances on the repo when it grows too large.
62
+ hf_api (`HfApi`, *optional*):
63
+ The [`HfApi`] client to use to commit to the Hub. Can be set with custom settings (user agent, token,...).
64
+
65
+ Example:
66
+ ```py
67
+ >>> from pathlib import Path
68
+ >>> from huggingface_hub import CommitScheduler
69
+
70
+ # Scheduler uploads every 10 minutes
71
+ >>> csv_path = Path("watched_folder/data.csv")
72
+ >>> CommitScheduler(repo_id="test_scheduler", repo_type="dataset", folder_path=csv_path.parent, every=10)
73
+
74
+ >>> with csv_path.open("a") as f:
75
+ ... f.write("first line")
76
+
77
+ # Some time later (...)
78
+ >>> with csv_path.open("a") as f:
79
+ ... f.write("second line")
80
+ ```
81
+ """
82
+
83
+ def __init__(
84
+ self,
85
+ *,
86
+ repo_id: str,
87
+ folder_path: Union[str, Path],
88
+ every: Union[int, float] = 5,
89
+ path_in_repo: Optional[str] = None,
90
+ repo_type: Optional[str] = None,
91
+ revision: Optional[str] = None,
92
+ private: bool = False,
93
+ token: Optional[str] = None,
94
+ allow_patterns: Optional[Union[List[str], str]] = None,
95
+ ignore_patterns: Optional[Union[List[str], str]] = None,
96
+ squash_history: bool = False,
97
+ hf_api: Optional["HfApi"] = None,
98
+ ) -> None:
99
+ self.api = hf_api or HfApi(token=token)
100
+
101
+ # Folder
102
+ self.folder_path = Path(folder_path).expanduser().resolve()
103
+ self.path_in_repo = path_in_repo or ""
104
+ self.allow_patterns = allow_patterns
105
+
106
+ if ignore_patterns is None:
107
+ ignore_patterns = []
108
+ elif isinstance(ignore_patterns, str):
109
+ ignore_patterns = [ignore_patterns]
110
+ self.ignore_patterns = ignore_patterns + DEFAULT_IGNORE_PATTERNS
111
+
112
+ if self.folder_path.is_file():
113
+ raise ValueError(f"'folder_path' must be a directory, not a file: '{self.folder_path}'.")
114
+ self.folder_path.mkdir(parents=True, exist_ok=True)
115
+
116
+ # Repository
117
+ repo_url = self.api.create_repo(repo_id=repo_id, private=private, repo_type=repo_type, exist_ok=True)
118
+ self.repo_id = repo_url.repo_id
119
+ self.repo_type = repo_type
120
+ self.revision = revision
121
+ self.token = token
122
+
123
+ # Keep track of already uploaded files
124
+ self.last_uploaded: Dict[Path, float] = {} # key is local path, value is timestamp
125
+
126
+ # Scheduler
127
+ if not every > 0:
128
+ raise ValueError(f"'every' must be a positive integer, not '{every}'.")
129
+ self.lock = Lock()
130
+ self.every = every
131
+ self.squash_history = squash_history
132
+
133
+ logger.info(f"Scheduled job to push '{self.folder_path}' to '{self.repo_id}' every {self.every} minutes.")
134
+ self._scheduler_thread = Thread(target=self._run_scheduler, daemon=True)
135
+ self._scheduler_thread.start()
136
+ atexit.register(self._push_to_hub)
137
+
138
+ self.__stopped = False
139
+
140
+ def stop(self) -> None:
141
+ """Stop the scheduler.
142
+
143
+ A stopped scheduler cannot be restarted. Mostly for tests purposes.
144
+ """
145
+ self.__stopped = True
146
+
147
+ def _run_scheduler(self) -> None:
148
+ """Dumb thread waiting between each scheduled push to Hub."""
149
+ while True:
150
+ self.last_future = self.trigger()
151
+ time.sleep(self.every * 60)
152
+ if self.__stopped:
153
+ break
154
+
155
+ def trigger(self) -> Future:
156
+ """Trigger a `push_to_hub` and return a future.
157
+
158
+ This method is automatically called every `every` minutes. You can also call it manually to trigger a commit
159
+ immediately, without waiting for the next scheduled commit.
160
+ """
161
+ return self.api.run_as_future(self._push_to_hub)
162
+
163
+ def _push_to_hub(self) -> Optional[CommitInfo]:
164
+ if self.__stopped: # If stopped, already scheduled commits are ignored
165
+ return None
166
+
167
+ logger.info("(Background) scheduled commit triggered.")
168
+ try:
169
+ value = self.push_to_hub()
170
+ if self.squash_history:
171
+ logger.info("(Background) squashing repo history.")
172
+ self.api.super_squash_history(repo_id=self.repo_id, repo_type=self.repo_type, branch=self.revision)
173
+ return value
174
+ except Exception as e:
175
+ logger.error(f"Error while pushing to Hub: {e}") # Depending on the setup, error might be silenced
176
+ raise
177
+
178
+ def push_to_hub(self) -> Optional[CommitInfo]:
179
+ """
180
+ Push folder to the Hub and return the commit info.
181
+
182
+ <Tip warning={true}>
183
+
184
+ This method is not meant to be called directly. It is run in the background by the scheduler, respecting a
185
+ queue mechanism to avoid concurrent commits. Making a direct call to the method might lead to concurrency
186
+ issues.
187
+
188
+ </Tip>
189
+
190
+ The default behavior of `push_to_hub` is to assume an append-only folder. It lists all files in the folder and
191
+ uploads only changed files. If no changes are found, the method returns without committing anything. If you want
192
+ to change this behavior, you can inherit from [`CommitScheduler`] and override this method. This can be useful
193
+ for example to compress data together in a single file before committing. For more details and examples, check
194
+ out our [integration guide](https://huggingface.co/docs/huggingface_hub/main/en/guides/upload#scheduled-uploads).
195
+ """
196
+ # Check files to upload (with lock)
197
+ with self.lock:
198
+ logger.debug("Listing files to upload for scheduled commit.")
199
+
200
+ # List files from folder (taken from `_prepare_upload_folder_additions`)
201
+ relpath_to_abspath = {
202
+ path.relative_to(self.folder_path).as_posix(): path
203
+ for path in sorted(self.folder_path.glob("**/*")) # sorted to be deterministic
204
+ if path.is_file()
205
+ }
206
+ prefix = f"{self.path_in_repo.strip('/')}/" if self.path_in_repo else ""
207
+
208
+ # Filter with pattern + filter out unchanged files + retrieve current file size
209
+ files_to_upload: List[_FileToUpload] = []
210
+ for relpath in filter_repo_objects(
211
+ relpath_to_abspath.keys(), allow_patterns=self.allow_patterns, ignore_patterns=self.ignore_patterns
212
+ ):
213
+ local_path = relpath_to_abspath[relpath]
214
+ stat = local_path.stat()
215
+ if self.last_uploaded.get(local_path) is None or self.last_uploaded[local_path] != stat.st_mtime:
216
+ files_to_upload.append(
217
+ _FileToUpload(
218
+ local_path=local_path,
219
+ path_in_repo=prefix + relpath,
220
+ size_limit=stat.st_size,
221
+ last_modified=stat.st_mtime,
222
+ )
223
+ )
224
+
225
+ # Return if nothing to upload
226
+ if len(files_to_upload) == 0:
227
+ logger.debug("Dropping schedule commit: no changed file to upload.")
228
+ return None
229
+
230
+ # Convert `_FileToUpload` as `CommitOperationAdd` (=> compute file shas + limit to file size)
231
+ logger.debug("Removing unchanged files since previous scheduled commit.")
232
+ add_operations = [
233
+ CommitOperationAdd(
234
+ # Cap the file to its current size, even if the user append data to it while a scheduled commit is happening
235
+ path_or_fileobj=PartialFileIO(file_to_upload.local_path, size_limit=file_to_upload.size_limit),
236
+ path_in_repo=file_to_upload.path_in_repo,
237
+ )
238
+ for file_to_upload in files_to_upload
239
+ ]
240
+
241
+ # Upload files (append mode expected - no need for lock)
242
+ logger.debug("Uploading files for scheduled commit.")
243
+ commit_info = self.api.create_commit(
244
+ repo_id=self.repo_id,
245
+ repo_type=self.repo_type,
246
+ operations=add_operations,
247
+ commit_message="Scheduled Commit",
248
+ revision=self.revision,
249
+ )
250
+
251
+ # Successful commit: keep track of the latest "last_modified" for each file
252
+ for file in files_to_upload:
253
+ self.last_uploaded[file.local_path] = file.last_modified
254
+ return commit_info
255
+
256
+
257
+ class PartialFileIO(BytesIO):
258
+ """A file-like object that reads only the first part of a file.
259
+
260
+ Useful to upload a file to the Hub when the user might still be appending data to it. Only the first part of the
261
+ file is uploaded (i.e. the part that was available when the filesystem was first scanned).
262
+
263
+ In practice, only used internally by the CommitScheduler to regularly push a folder to the Hub with minimal
264
+ disturbance for the user. The object is passed to `CommitOperationAdd`.
265
+
266
+ Only supports `read`, `tell` and `seek` methods.
267
+
268
+ Args:
269
+ file_path (`str` or `Path`):
270
+ Path to the file to read.
271
+ size_limit (`int`):
272
+ The maximum number of bytes to read from the file. If the file is larger than this, only the first part
273
+ will be read (and uploaded).
274
+ """
275
+
276
+ def __init__(self, file_path: Union[str, Path], size_limit: int) -> None:
277
+ self._file_path = Path(file_path)
278
+ self._file = self._file_path.open("rb")
279
+ self._size_limit = min(size_limit, os.fstat(self._file.fileno()).st_size)
280
+
281
+ def __del__(self) -> None:
282
+ self._file.close()
283
+ return super().__del__()
284
+
285
+ def __repr__(self) -> str:
286
+ return f"<PartialFileIO file_path={self._file_path} size_limit={self._size_limit}>"
287
+
288
+ def __len__(self) -> int:
289
+ return self._size_limit
290
+
291
+ def __getattribute__(self, name: str):
292
+ if name.startswith("_") or name in ("read", "tell", "seek"): # only 3 public methods supported
293
+ return super().__getattribute__(name)
294
+ raise NotImplementedError(f"PartialFileIO does not support '{name}'.")
295
+
296
+ def tell(self) -> int:
297
+ """Return the current file position."""
298
+ return self._file.tell()
299
+
300
+ def seek(self, __offset: int, __whence: int = SEEK_SET) -> int:
301
+ """Change the stream position to the given offset.
302
+
303
+ Behavior is the same as a regular file, except that the position is capped to the size limit.
304
+ """
305
+ if __whence == SEEK_END:
306
+ # SEEK_END => set from the truncated end
307
+ __offset = len(self) + __offset
308
+ __whence = SEEK_SET
309
+
310
+ pos = self._file.seek(__offset, __whence)
311
+ if pos > self._size_limit:
312
+ return self._file.seek(self._size_limit)
313
+ return pos
314
+
315
+ def read(self, __size: Optional[int] = -1) -> bytes:
316
+ """Read at most `__size` bytes from the file.
317
+
318
+ Behavior is the same as a regular file, except that it is capped to the size limit.
319
+ """
320
+ current = self._file.tell()
321
+ if __size is None or __size < 0:
322
+ # Read until file limit
323
+ truncated_size = self._size_limit - current
324
+ else:
325
+ # Read until file limit or __size
326
+ truncated_size = min(__size, self._size_limit - current)
327
+ return self._file.read(truncated_size)
valley/lib/python3.10/site-packages/huggingface_hub/_tensorboard_logger.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains a logger to push training logs to the Hub, using Tensorboard."""
15
+
16
+ from pathlib import Path
17
+ from typing import TYPE_CHECKING, List, Optional, Union
18
+
19
+ from ._commit_scheduler import CommitScheduler
20
+ from .errors import EntryNotFoundError
21
+ from .repocard import ModelCard
22
+ from .utils import experimental
23
+
24
+
25
+ # Depending on user's setup, SummaryWriter can come either from 'tensorboardX'
26
+ # or from 'torch.utils.tensorboard'. Both are compatible so let's try to load
27
+ # from either of them.
28
+ try:
29
+ from tensorboardX import SummaryWriter
30
+
31
+ is_summary_writer_available = True
32
+
33
+ except ImportError:
34
+ try:
35
+ from torch.utils.tensorboard import SummaryWriter
36
+
37
+ is_summary_writer_available = False
38
+ except ImportError:
39
+ # Dummy class to avoid failing at import. Will raise on instance creation.
40
+ SummaryWriter = object
41
+ is_summary_writer_available = False
42
+
43
+ if TYPE_CHECKING:
44
+ from tensorboardX import SummaryWriter
45
+
46
+
47
+ class HFSummaryWriter(SummaryWriter):
48
+ """
49
+ Wrapper around the tensorboard's `SummaryWriter` to push training logs to the Hub.
50
+
51
+ Data is logged locally and then pushed to the Hub asynchronously. Pushing data to the Hub is done in a separate
52
+ thread to avoid blocking the training script. In particular, if the upload fails for any reason (e.g. a connection
53
+ issue), the main script will not be interrupted. Data is automatically pushed to the Hub every `commit_every`
54
+ minutes (default to every 5 minutes).
55
+
56
+ <Tip warning={true}>
57
+
58
+ `HFSummaryWriter` is experimental. Its API is subject to change in the future without prior notice.
59
+
60
+ </Tip>
61
+
62
+ Args:
63
+ repo_id (`str`):
64
+ The id of the repo to which the logs will be pushed.
65
+ logdir (`str`, *optional*):
66
+ The directory where the logs will be written. If not specified, a local directory will be created by the
67
+ underlying `SummaryWriter` object.
68
+ commit_every (`int` or `float`, *optional*):
69
+ The frequency (in minutes) at which the logs will be pushed to the Hub. Defaults to 5 minutes.
70
+ squash_history (`bool`, *optional*):
71
+ Whether to squash the history of the repo after each commit. Defaults to `False`. Squashing commits is
72
+ useful to avoid degraded performances on the repo when it grows too large.
73
+ repo_type (`str`, *optional*):
74
+ The type of the repo to which the logs will be pushed. Defaults to "model".
75
+ repo_revision (`str`, *optional*):
76
+ The revision of the repo to which the logs will be pushed. Defaults to "main".
77
+ repo_private (`bool`, *optional*):
78
+ Whether to create a private repo or not. Defaults to False. This argument is ignored if the repo already
79
+ exists.
80
+ path_in_repo (`str`, *optional*):
81
+ The path to the folder in the repo where the logs will be pushed. Defaults to "tensorboard/".
82
+ repo_allow_patterns (`List[str]` or `str`, *optional*):
83
+ A list of patterns to include in the upload. Defaults to `"*.tfevents.*"`. Check out the
84
+ [upload guide](https://huggingface.co/docs/huggingface_hub/guides/upload#upload-a-folder) for more details.
85
+ repo_ignore_patterns (`List[str]` or `str`, *optional*):
86
+ A list of patterns to exclude in the upload. Check out the
87
+ [upload guide](https://huggingface.co/docs/huggingface_hub/guides/upload#upload-a-folder) for more details.
88
+ token (`str`, *optional*):
89
+ Authentication token. Will default to the stored token. See https://huggingface.co/settings/token for more
90
+ details
91
+ kwargs:
92
+ Additional keyword arguments passed to `SummaryWriter`.
93
+
94
+ Examples:
95
+ ```diff
96
+ # Taken from https://pytorch.org/docs/stable/tensorboard.html
97
+ - from torch.utils.tensorboard import SummaryWriter
98
+ + from huggingface_hub import HFSummaryWriter
99
+
100
+ import numpy as np
101
+
102
+ - writer = SummaryWriter()
103
+ + writer = HFSummaryWriter(repo_id="username/my-trained-model")
104
+
105
+ for n_iter in range(100):
106
+ writer.add_scalar('Loss/train', np.random.random(), n_iter)
107
+ writer.add_scalar('Loss/test', np.random.random(), n_iter)
108
+ writer.add_scalar('Accuracy/train', np.random.random(), n_iter)
109
+ writer.add_scalar('Accuracy/test', np.random.random(), n_iter)
110
+ ```
111
+
112
+ ```py
113
+ >>> from huggingface_hub import HFSummaryWriter
114
+
115
+ # Logs are automatically pushed every 15 minutes (5 by default) + when exiting the context manager
116
+ >>> with HFSummaryWriter(repo_id="test_hf_logger", commit_every=15) as logger:
117
+ ... logger.add_scalar("a", 1)
118
+ ... logger.add_scalar("b", 2)
119
+ ```
120
+ """
121
+
122
+ @experimental
123
+ def __new__(cls, *args, **kwargs) -> "HFSummaryWriter":
124
+ if not is_summary_writer_available:
125
+ raise ImportError(
126
+ "You must have `tensorboard` installed to use `HFSummaryWriter`. Please run `pip install --upgrade"
127
+ " tensorboardX` first."
128
+ )
129
+ return super().__new__(cls)
130
+
131
+ def __init__(
132
+ self,
133
+ repo_id: str,
134
+ *,
135
+ logdir: Optional[str] = None,
136
+ commit_every: Union[int, float] = 5,
137
+ squash_history: bool = False,
138
+ repo_type: Optional[str] = None,
139
+ repo_revision: Optional[str] = None,
140
+ repo_private: bool = False,
141
+ path_in_repo: Optional[str] = "tensorboard",
142
+ repo_allow_patterns: Optional[Union[List[str], str]] = "*.tfevents.*",
143
+ repo_ignore_patterns: Optional[Union[List[str], str]] = None,
144
+ token: Optional[str] = None,
145
+ **kwargs,
146
+ ):
147
+ # Initialize SummaryWriter
148
+ super().__init__(logdir=logdir, **kwargs)
149
+
150
+ # Check logdir has been correctly initialized and fail early otherwise. In practice, SummaryWriter takes care of it.
151
+ if not isinstance(self.logdir, str):
152
+ raise ValueError(f"`self.logdir` must be a string. Got '{self.logdir}' of type {type(self.logdir)}.")
153
+
154
+ # Append logdir name to `path_in_repo`
155
+ if path_in_repo is None or path_in_repo == "":
156
+ path_in_repo = Path(self.logdir).name
157
+ else:
158
+ path_in_repo = path_in_repo.strip("/") + "/" + Path(self.logdir).name
159
+
160
+ # Initialize scheduler
161
+ self.scheduler = CommitScheduler(
162
+ folder_path=self.logdir,
163
+ path_in_repo=path_in_repo,
164
+ repo_id=repo_id,
165
+ repo_type=repo_type,
166
+ revision=repo_revision,
167
+ private=repo_private,
168
+ token=token,
169
+ allow_patterns=repo_allow_patterns,
170
+ ignore_patterns=repo_ignore_patterns,
171
+ every=commit_every,
172
+ squash_history=squash_history,
173
+ )
174
+
175
+ # Exposing some high-level info at root level
176
+ self.repo_id = self.scheduler.repo_id
177
+ self.repo_type = self.scheduler.repo_type
178
+ self.repo_revision = self.scheduler.revision
179
+
180
+ # Add `hf-summary-writer` tag to the model card metadata
181
+ try:
182
+ card = ModelCard.load(repo_id_or_path=self.repo_id, repo_type=self.repo_type)
183
+ except EntryNotFoundError:
184
+ card = ModelCard("")
185
+ tags = card.data.get("tags", [])
186
+ if "hf-summary-writer" not in tags:
187
+ tags.append("hf-summary-writer")
188
+ card.data["tags"] = tags
189
+ card.push_to_hub(repo_id=self.repo_id, repo_type=self.repo_type)
190
+
191
+ def __exit__(self, exc_type, exc_val, exc_tb):
192
+ """Push to hub in a non-blocking way when exiting the logger's context manager."""
193
+ super().__exit__(exc_type, exc_val, exc_tb)
194
+ future = self.scheduler.trigger()
195
+ future.result()
valley/lib/python3.10/site-packages/huggingface_hub/_webhooks_server.py ADDED
@@ -0,0 +1,386 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Contains `WebhooksServer` and `webhook_endpoint` to create a webhook server easily."""
16
+
17
+ import atexit
18
+ import inspect
19
+ import os
20
+ from functools import wraps
21
+ from typing import TYPE_CHECKING, Any, Callable, Dict, Optional
22
+
23
+ from .utils import experimental, is_fastapi_available, is_gradio_available
24
+
25
+
26
+ if TYPE_CHECKING:
27
+ import gradio as gr
28
+ from fastapi import Request
29
+
30
+ if is_fastapi_available():
31
+ from fastapi import FastAPI, Request
32
+ from fastapi.responses import JSONResponse
33
+ else:
34
+ # Will fail at runtime if FastAPI is not available
35
+ FastAPI = Request = JSONResponse = None # type: ignore [misc, assignment]
36
+
37
+
38
+ _global_app: Optional["WebhooksServer"] = None
39
+ _is_local = os.environ.get("SPACE_ID") is None
40
+
41
+
42
+ @experimental
43
+ class WebhooksServer:
44
+ """
45
+ The [`WebhooksServer`] class lets you create an instance of a Gradio app that can receive Huggingface webhooks.
46
+ These webhooks can be registered using the [`~WebhooksServer.add_webhook`] decorator. Webhook endpoints are added to
47
+ the app as a POST endpoint to the FastAPI router. Once all the webhooks are registered, the `launch` method has to be
48
+ called to start the app.
49
+
50
+ It is recommended to accept [`WebhookPayload`] as the first argument of the webhook function. It is a Pydantic
51
+ model that contains all the information about the webhook event. The data will be parsed automatically for you.
52
+
53
+ Check out the [webhooks guide](../guides/webhooks_server) for a step-by-step tutorial on how to setup your
54
+ WebhooksServer and deploy it on a Space.
55
+
56
+ <Tip warning={true}>
57
+
58
+ `WebhooksServer` is experimental. Its API is subject to change in the future.
59
+
60
+ </Tip>
61
+
62
+ <Tip warning={true}>
63
+
64
+ You must have `gradio` installed to use `WebhooksServer` (`pip install --upgrade gradio`).
65
+
66
+ </Tip>
67
+
68
+ Args:
69
+ ui (`gradio.Blocks`, optional):
70
+ A Gradio UI instance to be used as the Space landing page. If `None`, a UI displaying instructions
71
+ about the configured webhooks is created.
72
+ webhook_secret (`str`, optional):
73
+ A secret key to verify incoming webhook requests. You can set this value to any secret you want as long as
74
+ you also configure it in your [webhooks settings panel](https://huggingface.co/settings/webhooks). You
75
+ can also set this value as the `WEBHOOK_SECRET` environment variable. If no secret is provided, the
76
+ webhook endpoints are opened without any security.
77
+
78
+ Example:
79
+
80
+ ```python
81
+ import gradio as gr
82
+ from huggingface_hub import WebhooksServer, WebhookPayload
83
+
84
+ with gr.Blocks() as ui:
85
+ ...
86
+
87
+ app = WebhooksServer(ui=ui, webhook_secret="my_secret_key")
88
+
89
+ @app.add_webhook("/say_hello")
90
+ async def hello(payload: WebhookPayload):
91
+ return {"message": "hello"}
92
+
93
+ app.launch()
94
+ ```
95
+ """
96
+
97
+ def __new__(cls, *args, **kwargs) -> "WebhooksServer":
98
+ if not is_gradio_available():
99
+ raise ImportError(
100
+ "You must have `gradio` installed to use `WebhooksServer`. Please run `pip install --upgrade gradio`"
101
+ " first."
102
+ )
103
+ if not is_fastapi_available():
104
+ raise ImportError(
105
+ "You must have `fastapi` installed to use `WebhooksServer`. Please run `pip install --upgrade fastapi`"
106
+ " first."
107
+ )
108
+ return super().__new__(cls)
109
+
110
+ def __init__(
111
+ self,
112
+ ui: Optional["gr.Blocks"] = None,
113
+ webhook_secret: Optional[str] = None,
114
+ ) -> None:
115
+ self._ui = ui
116
+
117
+ self.webhook_secret = webhook_secret or os.getenv("WEBHOOK_SECRET")
118
+ self.registered_webhooks: Dict[str, Callable] = {}
119
+ _warn_on_empty_secret(self.webhook_secret)
120
+
121
+ def add_webhook(self, path: Optional[str] = None) -> Callable:
122
+ """
123
+ Decorator to add a webhook to the [`WebhooksServer`] server.
124
+
125
+ Args:
126
+ path (`str`, optional):
127
+ The URL path to register the webhook function. If not provided, the function name will be used as the
128
+ path. In any case, all webhooks are registered under `/webhooks`.
129
+
130
+ Raises:
131
+ ValueError: If the provided path is already registered as a webhook.
132
+
133
+ Example:
134
+ ```python
135
+ from huggingface_hub import WebhooksServer, WebhookPayload
136
+
137
+ app = WebhooksServer()
138
+
139
+ @app.add_webhook
140
+ async def trigger_training(payload: WebhookPayload):
141
+ if payload.repo.type == "dataset" and payload.event.action == "update":
142
+ # Trigger a training job if a dataset is updated
143
+ ...
144
+
145
+ app.launch()
146
+ ```
147
+ """
148
+ # Usage: directly as decorator. Example: `@app.add_webhook`
149
+ if callable(path):
150
+ # If path is a function, it means it was used as a decorator without arguments
151
+ return self.add_webhook()(path)
152
+
153
+ # Usage: provide a path. Example: `@app.add_webhook(...)`
154
+ @wraps(FastAPI.post)
155
+ def _inner_post(*args, **kwargs):
156
+ func = args[0]
157
+ abs_path = f"/webhooks/{(path or func.__name__).strip('/')}"
158
+ if abs_path in self.registered_webhooks:
159
+ raise ValueError(f"Webhook {abs_path} already exists.")
160
+ self.registered_webhooks[abs_path] = func
161
+
162
+ return _inner_post
163
+
164
+ def launch(self, prevent_thread_lock: bool = False, **launch_kwargs: Any) -> None:
165
+ """Launch the Gradio app and register webhooks to the underlying FastAPI server.
166
+
167
+ Input parameters are forwarded to Gradio when launching the app.
168
+ """
169
+ ui = self._ui or self._get_default_ui()
170
+
171
+ # Start Gradio App
172
+ # - as non-blocking so that webhooks can be added afterwards
173
+ # - as shared if launch locally (to debug webhooks)
174
+ launch_kwargs.setdefault("share", _is_local)
175
+ self.fastapi_app, _, _ = ui.launch(prevent_thread_lock=True, **launch_kwargs)
176
+
177
+ # Register webhooks to FastAPI app
178
+ for path, func in self.registered_webhooks.items():
179
+ # Add secret check if required
180
+ if self.webhook_secret is not None:
181
+ func = _wrap_webhook_to_check_secret(func, webhook_secret=self.webhook_secret)
182
+
183
+ # Add route to FastAPI app
184
+ self.fastapi_app.post(path)(func)
185
+
186
+ # Print instructions and block main thread
187
+ space_host = os.environ.get("SPACE_HOST")
188
+ url = "https://" + space_host if space_host is not None else (ui.share_url or ui.local_url)
189
+ url = url.strip("/")
190
+ message = "\nWebhooks are correctly setup and ready to use:"
191
+ message += "\n" + "\n".join(f" - POST {url}{webhook}" for webhook in self.registered_webhooks)
192
+ message += "\nGo to https://huggingface.co/settings/webhooks to setup your webhooks."
193
+ print(message)
194
+
195
+ if not prevent_thread_lock:
196
+ ui.block_thread()
197
+
198
+ def _get_default_ui(self) -> "gr.Blocks":
199
+ """Default UI if not provided (lists webhooks and provides basic instructions)."""
200
+ import gradio as gr
201
+
202
+ with gr.Blocks() as ui:
203
+ gr.Markdown("# This is an app to process 🤗 Webhooks")
204
+ gr.Markdown(
205
+ "Webhooks are a foundation for MLOps-related features. They allow you to listen for new changes on"
206
+ " specific repos or to all repos belonging to particular set of users/organizations (not just your"
207
+ " repos, but any repo). Check out this [guide](https://huggingface.co/docs/hub/webhooks) to get to"
208
+ " know more about webhooks on the Huggingface Hub."
209
+ )
210
+ gr.Markdown(
211
+ f"{len(self.registered_webhooks)} webhook(s) are registered:"
212
+ + "\n\n"
213
+ + "\n ".join(
214
+ f"- [{webhook_path}]({_get_webhook_doc_url(webhook.__name__, webhook_path)})"
215
+ for webhook_path, webhook in self.registered_webhooks.items()
216
+ )
217
+ )
218
+ gr.Markdown(
219
+ "Go to https://huggingface.co/settings/webhooks to setup your webhooks."
220
+ + "\nYou app is running locally. Please look at the logs to check the full URL you need to set."
221
+ if _is_local
222
+ else (
223
+ "\nThis app is running on a Space. You can find the corresponding URL in the options menu"
224
+ " (top-right) > 'Embed the Space'. The URL looks like 'https://{username}-{repo_name}.hf.space'."
225
+ )
226
+ )
227
+ return ui
228
+
229
+
230
+ @experimental
231
+ def webhook_endpoint(path: Optional[str] = None) -> Callable:
232
+ """Decorator to start a [`WebhooksServer`] and register the decorated function as a webhook endpoint.
233
+
234
+ This is a helper to get started quickly. If you need more flexibility (custom landing page or webhook secret),
235
+ you can use [`WebhooksServer`] directly. You can register multiple webhook endpoints (to the same server) by using
236
+ this decorator multiple times.
237
+
238
+ Check out the [webhooks guide](../guides/webhooks_server) for a step-by-step tutorial on how to setup your
239
+ server and deploy it on a Space.
240
+
241
+ <Tip warning={true}>
242
+
243
+ `webhook_endpoint` is experimental. Its API is subject to change in the future.
244
+
245
+ </Tip>
246
+
247
+ <Tip warning={true}>
248
+
249
+ You must have `gradio` installed to use `webhook_endpoint` (`pip install --upgrade gradio`).
250
+
251
+ </Tip>
252
+
253
+ Args:
254
+ path (`str`, optional):
255
+ The URL path to register the webhook function. If not provided, the function name will be used as the path.
256
+ In any case, all webhooks are registered under `/webhooks`.
257
+
258
+ Examples:
259
+ The default usage is to register a function as a webhook endpoint. The function name will be used as the path.
260
+ The server will be started automatically at exit (i.e. at the end of the script).
261
+
262
+ ```python
263
+ from huggingface_hub import webhook_endpoint, WebhookPayload
264
+
265
+ @webhook_endpoint
266
+ async def trigger_training(payload: WebhookPayload):
267
+ if payload.repo.type == "dataset" and payload.event.action == "update":
268
+ # Trigger a training job if a dataset is updated
269
+ ...
270
+
271
+ # Server is automatically started at the end of the script.
272
+ ```
273
+
274
+ Advanced usage: register a function as a webhook endpoint and start the server manually. This is useful if you
275
+ are running it in a notebook.
276
+
277
+ ```python
278
+ from huggingface_hub import webhook_endpoint, WebhookPayload
279
+
280
+ @webhook_endpoint
281
+ async def trigger_training(payload: WebhookPayload):
282
+ if payload.repo.type == "dataset" and payload.event.action == "update":
283
+ # Trigger a training job if a dataset is updated
284
+ ...
285
+
286
+ # Start the server manually
287
+ trigger_training.launch()
288
+ ```
289
+ """
290
+ if callable(path):
291
+ # If path is a function, it means it was used as a decorator without arguments
292
+ return webhook_endpoint()(path)
293
+
294
+ @wraps(WebhooksServer.add_webhook)
295
+ def _inner(func: Callable) -> Callable:
296
+ app = _get_global_app()
297
+ app.add_webhook(path)(func)
298
+ if len(app.registered_webhooks) == 1:
299
+ # Register `app.launch` to run at exit (only once)
300
+ atexit.register(app.launch)
301
+
302
+ @wraps(app.launch)
303
+ def _launch_now():
304
+ # Run the app directly (without waiting atexit)
305
+ atexit.unregister(app.launch)
306
+ app.launch()
307
+
308
+ func.launch = _launch_now # type: ignore
309
+ return func
310
+
311
+ return _inner
312
+
313
+
314
+ def _get_global_app() -> WebhooksServer:
315
+ global _global_app
316
+ if _global_app is None:
317
+ _global_app = WebhooksServer()
318
+ return _global_app
319
+
320
+
321
+ def _warn_on_empty_secret(webhook_secret: Optional[str]) -> None:
322
+ if webhook_secret is None:
323
+ print("Webhook secret is not defined. This means your webhook endpoints will be open to everyone.")
324
+ print(
325
+ "To add a secret, set `WEBHOOK_SECRET` as environment variable or pass it at initialization: "
326
+ "\n\t`app = WebhooksServer(webhook_secret='my_secret', ...)`"
327
+ )
328
+ print(
329
+ "For more details about webhook secrets, please refer to"
330
+ " https://huggingface.co/docs/hub/webhooks#webhook-secret."
331
+ )
332
+ else:
333
+ print("Webhook secret is correctly defined.")
334
+
335
+
336
+ def _get_webhook_doc_url(webhook_name: str, webhook_path: str) -> str:
337
+ """Returns the anchor to a given webhook in the docs (experimental)"""
338
+ return "/docs#/default/" + webhook_name + webhook_path.replace("/", "_") + "_post"
339
+
340
+
341
+ def _wrap_webhook_to_check_secret(func: Callable, webhook_secret: str) -> Callable:
342
+ """Wraps a webhook function to check the webhook secret before calling the function.
343
+
344
+ This is a hacky way to add the `request` parameter to the function signature. Since FastAPI based itself on route
345
+ parameters to inject the values to the function, we need to hack the function signature to retrieve the `Request`
346
+ object (and hence the headers). A far cleaner solution would be to use a middleware. However, since
347
+ `fastapi==0.90.1`, a middleware cannot be added once the app has started. And since the FastAPI app is started by
348
+ Gradio internals (and not by us), we cannot add a middleware.
349
+
350
+ This method is called only when a secret has been defined by the user. If a request is sent without the
351
+ "x-webhook-secret", the function will return a 401 error (unauthorized). If the header is sent but is incorrect,
352
+ the function will return a 403 error (forbidden).
353
+
354
+ Inspired by https://stackoverflow.com/a/33112180.
355
+ """
356
+ initial_sig = inspect.signature(func)
357
+
358
+ @wraps(func)
359
+ async def _protected_func(request: Request, **kwargs):
360
+ request_secret = request.headers.get("x-webhook-secret")
361
+ if request_secret is None:
362
+ return JSONResponse({"error": "x-webhook-secret header not set."}, status_code=401)
363
+ if request_secret != webhook_secret:
364
+ return JSONResponse({"error": "Invalid webhook secret."}, status_code=403)
365
+
366
+ # Inject `request` in kwargs if required
367
+ if "request" in initial_sig.parameters:
368
+ kwargs["request"] = request
369
+
370
+ # Handle both sync and async routes
371
+ if inspect.iscoroutinefunction(func):
372
+ return await func(**kwargs)
373
+ else:
374
+ return func(**kwargs)
375
+
376
+ # Update signature to include request
377
+ if "request" not in initial_sig.parameters:
378
+ _protected_func.__signature__ = initial_sig.replace( # type: ignore
379
+ parameters=(
380
+ inspect.Parameter(name="request", kind=inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=Request),
381
+ )
382
+ + tuple(initial_sig.parameters.values())
383
+ )
384
+
385
+ # Return protected route
386
+ return _protected_func