davidtran999 commited on
Commit
3acdc71
·
verified ·
1 Parent(s): d3a74f0

Upload backend/venv/lib/python3.10/site-packages/diskcache/recipes.py with huggingface_hub

Browse files
backend/venv/lib/python3.10/site-packages/diskcache/recipes.py ADDED
@@ -0,0 +1,488 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Disk Cache Recipes
2
+ """
3
+
4
+ import functools
5
+ import math
6
+ import os
7
+ import random
8
+ import threading
9
+ import time
10
+
11
+ from .core import ENOVAL, args_to_key, full_name
12
+
13
+
14
+ class Averager:
15
+ """Recipe for calculating a running average.
16
+
17
+ Sometimes known as "online statistics," the running average maintains the
18
+ total and count. The average can then be calculated at any time.
19
+
20
+ Assumes the key will not be evicted. Set the eviction policy to 'none' on
21
+ the cache to guarantee the key is not evicted.
22
+
23
+ >>> import diskcache
24
+ >>> cache = diskcache.FanoutCache()
25
+ >>> ave = Averager(cache, 'latency')
26
+ >>> ave.add(0.080)
27
+ >>> ave.add(0.120)
28
+ >>> ave.get()
29
+ 0.1
30
+ >>> ave.add(0.160)
31
+ >>> ave.pop()
32
+ 0.12
33
+ >>> print(ave.get())
34
+ None
35
+
36
+ """
37
+
38
+ def __init__(self, cache, key, expire=None, tag=None):
39
+ self._cache = cache
40
+ self._key = key
41
+ self._expire = expire
42
+ self._tag = tag
43
+
44
+ def add(self, value):
45
+ """Add `value` to average."""
46
+ with self._cache.transact(retry=True):
47
+ total, count = self._cache.get(self._key, default=(0.0, 0))
48
+ total += value
49
+ count += 1
50
+ self._cache.set(
51
+ self._key,
52
+ (total, count),
53
+ expire=self._expire,
54
+ tag=self._tag,
55
+ )
56
+
57
+ def get(self):
58
+ """Get current average or return `None` if count equals zero."""
59
+ total, count = self._cache.get(self._key, default=(0.0, 0), retry=True)
60
+ return None if count == 0 else total / count
61
+
62
+ def pop(self):
63
+ """Return current average and delete key."""
64
+ total, count = self._cache.pop(self._key, default=(0.0, 0), retry=True)
65
+ return None if count == 0 else total / count
66
+
67
+
68
+ class Lock:
69
+ """Recipe for cross-process and cross-thread lock.
70
+
71
+ Assumes the key will not be evicted. Set the eviction policy to 'none' on
72
+ the cache to guarantee the key is not evicted.
73
+
74
+ >>> import diskcache
75
+ >>> cache = diskcache.Cache()
76
+ >>> lock = Lock(cache, 'report-123')
77
+ >>> lock.acquire()
78
+ >>> lock.release()
79
+ >>> with lock:
80
+ ... pass
81
+
82
+ """
83
+
84
+ def __init__(self, cache, key, expire=None, tag=None):
85
+ self._cache = cache
86
+ self._key = key
87
+ self._expire = expire
88
+ self._tag = tag
89
+
90
+ def acquire(self):
91
+ """Acquire lock using spin-lock algorithm."""
92
+ while True:
93
+ added = self._cache.add(
94
+ self._key,
95
+ None,
96
+ expire=self._expire,
97
+ tag=self._tag,
98
+ retry=True,
99
+ )
100
+ if added:
101
+ break
102
+ time.sleep(0.001)
103
+
104
+ def release(self):
105
+ """Release lock by deleting key."""
106
+ self._cache.delete(self._key, retry=True)
107
+
108
+ def locked(self):
109
+ """Return true if the lock is acquired."""
110
+ return self._key in self._cache
111
+
112
+ def __enter__(self):
113
+ self.acquire()
114
+
115
+ def __exit__(self, *exc_info):
116
+ self.release()
117
+
118
+
119
+ class RLock:
120
+ """Recipe for cross-process and cross-thread re-entrant lock.
121
+
122
+ Assumes the key will not be evicted. Set the eviction policy to 'none' on
123
+ the cache to guarantee the key is not evicted.
124
+
125
+ >>> import diskcache
126
+ >>> cache = diskcache.Cache()
127
+ >>> rlock = RLock(cache, 'user-123')
128
+ >>> rlock.acquire()
129
+ >>> rlock.acquire()
130
+ >>> rlock.release()
131
+ >>> with rlock:
132
+ ... pass
133
+ >>> rlock.release()
134
+ >>> rlock.release()
135
+ Traceback (most recent call last):
136
+ ...
137
+ AssertionError: cannot release un-acquired lock
138
+
139
+ """
140
+
141
+ def __init__(self, cache, key, expire=None, tag=None):
142
+ self._cache = cache
143
+ self._key = key
144
+ self._expire = expire
145
+ self._tag = tag
146
+
147
+ def acquire(self):
148
+ """Acquire lock by incrementing count using spin-lock algorithm."""
149
+ pid = os.getpid()
150
+ tid = threading.get_ident()
151
+ pid_tid = '{}-{}'.format(pid, tid)
152
+
153
+ while True:
154
+ with self._cache.transact(retry=True):
155
+ value, count = self._cache.get(self._key, default=(None, 0))
156
+ if pid_tid == value or count == 0:
157
+ self._cache.set(
158
+ self._key,
159
+ (pid_tid, count + 1),
160
+ expire=self._expire,
161
+ tag=self._tag,
162
+ )
163
+ return
164
+ time.sleep(0.001)
165
+
166
+ def release(self):
167
+ """Release lock by decrementing count."""
168
+ pid = os.getpid()
169
+ tid = threading.get_ident()
170
+ pid_tid = '{}-{}'.format(pid, tid)
171
+
172
+ with self._cache.transact(retry=True):
173
+ value, count = self._cache.get(self._key, default=(None, 0))
174
+ is_owned = pid_tid == value and count > 0
175
+ assert is_owned, 'cannot release un-acquired lock'
176
+ self._cache.set(
177
+ self._key,
178
+ (value, count - 1),
179
+ expire=self._expire,
180
+ tag=self._tag,
181
+ )
182
+
183
+ def __enter__(self):
184
+ self.acquire()
185
+
186
+ def __exit__(self, *exc_info):
187
+ self.release()
188
+
189
+
190
+ class BoundedSemaphore:
191
+ """Recipe for cross-process and cross-thread bounded semaphore.
192
+
193
+ Assumes the key will not be evicted. Set the eviction policy to 'none' on
194
+ the cache to guarantee the key is not evicted.
195
+
196
+ >>> import diskcache
197
+ >>> cache = diskcache.Cache()
198
+ >>> semaphore = BoundedSemaphore(cache, 'max-cons', value=2)
199
+ >>> semaphore.acquire()
200
+ >>> semaphore.acquire()
201
+ >>> semaphore.release()
202
+ >>> with semaphore:
203
+ ... pass
204
+ >>> semaphore.release()
205
+ >>> semaphore.release()
206
+ Traceback (most recent call last):
207
+ ...
208
+ AssertionError: cannot release un-acquired semaphore
209
+
210
+ """
211
+
212
+ def __init__(self, cache, key, value=1, expire=None, tag=None):
213
+ self._cache = cache
214
+ self._key = key
215
+ self._value = value
216
+ self._expire = expire
217
+ self._tag = tag
218
+
219
+ def acquire(self):
220
+ """Acquire semaphore by decrementing value using spin-lock algorithm."""
221
+ while True:
222
+ with self._cache.transact(retry=True):
223
+ value = self._cache.get(self._key, default=self._value)
224
+ if value > 0:
225
+ self._cache.set(
226
+ self._key,
227
+ value - 1,
228
+ expire=self._expire,
229
+ tag=self._tag,
230
+ )
231
+ return
232
+ time.sleep(0.001)
233
+
234
+ def release(self):
235
+ """Release semaphore by incrementing value."""
236
+ with self._cache.transact(retry=True):
237
+ value = self._cache.get(self._key, default=self._value)
238
+ assert self._value > value, 'cannot release un-acquired semaphore'
239
+ value += 1
240
+ self._cache.set(
241
+ self._key,
242
+ value,
243
+ expire=self._expire,
244
+ tag=self._tag,
245
+ )
246
+
247
+ def __enter__(self):
248
+ self.acquire()
249
+
250
+ def __exit__(self, *exc_info):
251
+ self.release()
252
+
253
+
254
+ def throttle(
255
+ cache,
256
+ count,
257
+ seconds,
258
+ name=None,
259
+ expire=None,
260
+ tag=None,
261
+ time_func=time.time,
262
+ sleep_func=time.sleep,
263
+ ):
264
+ """Decorator to throttle calls to function.
265
+
266
+ Assumes keys will not be evicted. Set the eviction policy to 'none' on the
267
+ cache to guarantee the keys are not evicted.
268
+
269
+ >>> import diskcache, time
270
+ >>> cache = diskcache.Cache()
271
+ >>> count = 0
272
+ >>> @throttle(cache, 2, 1) # 2 calls per 1 second
273
+ ... def increment():
274
+ ... global count
275
+ ... count += 1
276
+ >>> start = time.time()
277
+ >>> while (time.time() - start) <= 2:
278
+ ... increment()
279
+ >>> count in (6, 7) # 6 or 7 calls depending on CPU load
280
+ True
281
+
282
+ """
283
+
284
+ def decorator(func):
285
+ rate = count / float(seconds)
286
+ key = full_name(func) if name is None else name
287
+ now = time_func()
288
+ cache.set(key, (now, count), expire=expire, tag=tag, retry=True)
289
+
290
+ @functools.wraps(func)
291
+ def wrapper(*args, **kwargs):
292
+ while True:
293
+ with cache.transact(retry=True):
294
+ last, tally = cache.get(key)
295
+ now = time_func()
296
+ tally += (now - last) * rate
297
+ delay = 0
298
+
299
+ if tally > count:
300
+ cache.set(key, (now, count - 1), expire)
301
+ elif tally >= 1:
302
+ cache.set(key, (now, tally - 1), expire)
303
+ else:
304
+ delay = (1 - tally) / rate
305
+
306
+ if delay:
307
+ sleep_func(delay)
308
+ else:
309
+ break
310
+
311
+ return func(*args, **kwargs)
312
+
313
+ return wrapper
314
+
315
+ return decorator
316
+
317
+
318
+ def barrier(cache, lock_factory, name=None, expire=None, tag=None):
319
+ """Barrier to calling decorated function.
320
+
321
+ Supports different kinds of locks: Lock, RLock, BoundedSemaphore.
322
+
323
+ Assumes keys will not be evicted. Set the eviction policy to 'none' on the
324
+ cache to guarantee the keys are not evicted.
325
+
326
+ >>> import diskcache, time
327
+ >>> cache = diskcache.Cache()
328
+ >>> @barrier(cache, Lock)
329
+ ... def work(num):
330
+ ... print('worker started')
331
+ ... time.sleep(1)
332
+ ... print('worker finished')
333
+ >>> import multiprocessing.pool
334
+ >>> pool = multiprocessing.pool.ThreadPool(2)
335
+ >>> _ = pool.map(work, range(2))
336
+ worker started
337
+ worker finished
338
+ worker started
339
+ worker finished
340
+ >>> pool.terminate()
341
+
342
+ """
343
+
344
+ def decorator(func):
345
+ key = full_name(func) if name is None else name
346
+ lock = lock_factory(cache, key, expire=expire, tag=tag)
347
+
348
+ @functools.wraps(func)
349
+ def wrapper(*args, **kwargs):
350
+ with lock:
351
+ return func(*args, **kwargs)
352
+
353
+ return wrapper
354
+
355
+ return decorator
356
+
357
+
358
+ def memoize_stampede(
359
+ cache, expire, name=None, typed=False, tag=None, beta=1, ignore=()
360
+ ):
361
+ """Memoizing cache decorator with cache stampede protection.
362
+
363
+ Cache stampedes are a type of system overload that can occur when parallel
364
+ computing systems using memoization come under heavy load. This behaviour
365
+ is sometimes also called dog-piling, cache miss storm, cache choking, or
366
+ the thundering herd problem.
367
+
368
+ The memoization decorator implements cache stampede protection through
369
+ early recomputation. Early recomputation of function results will occur
370
+ probabilistically before expiration in a background thread of
371
+ execution. Early probabilistic recomputation is based on research by
372
+ Vattani, A.; Chierichetti, F.; Lowenstein, K. (2015), Optimal Probabilistic
373
+ Cache Stampede Prevention, VLDB, pp. 886-897, ISSN 2150-8097
374
+
375
+ If name is set to None (default), the callable name will be determined
376
+ automatically.
377
+
378
+ If typed is set to True, function arguments of different types will be
379
+ cached separately. For example, f(3) and f(3.0) will be treated as distinct
380
+ calls with distinct results.
381
+
382
+ The original underlying function is accessible through the `__wrapped__`
383
+ attribute. This is useful for introspection, for bypassing the cache, or
384
+ for rewrapping the function with a different cache.
385
+
386
+ >>> from diskcache import Cache
387
+ >>> cache = Cache()
388
+ >>> @memoize_stampede(cache, expire=1)
389
+ ... def fib(number):
390
+ ... if number == 0:
391
+ ... return 0
392
+ ... elif number == 1:
393
+ ... return 1
394
+ ... else:
395
+ ... return fib(number - 1) + fib(number - 2)
396
+ >>> print(fib(100))
397
+ 354224848179261915075
398
+
399
+ An additional `__cache_key__` attribute can be used to generate the cache
400
+ key used for the given arguments.
401
+
402
+ >>> key = fib.__cache_key__(100)
403
+ >>> del cache[key]
404
+
405
+ Remember to call memoize when decorating a callable. If you forget, then a
406
+ TypeError will occur.
407
+
408
+ :param cache: cache to store callable arguments and return values
409
+ :param float expire: seconds until arguments expire
410
+ :param str name: name given for callable (default None, automatic)
411
+ :param bool typed: cache different types separately (default False)
412
+ :param str tag: text to associate with arguments (default None)
413
+ :param set ignore: positional or keyword args to ignore (default ())
414
+ :return: callable decorator
415
+
416
+ """
417
+ # Caution: Nearly identical code exists in Cache.memoize
418
+ def decorator(func):
419
+ """Decorator created by memoize call for callable."""
420
+ base = (full_name(func),) if name is None else (name,)
421
+
422
+ def timer(*args, **kwargs):
423
+ """Time execution of `func` and return result and time delta."""
424
+ start = time.time()
425
+ result = func(*args, **kwargs)
426
+ delta = time.time() - start
427
+ return result, delta
428
+
429
+ @functools.wraps(func)
430
+ def wrapper(*args, **kwargs):
431
+ """Wrapper for callable to cache arguments and return values."""
432
+ key = wrapper.__cache_key__(*args, **kwargs)
433
+ pair, expire_time = cache.get(
434
+ key,
435
+ default=ENOVAL,
436
+ expire_time=True,
437
+ retry=True,
438
+ )
439
+
440
+ if pair is not ENOVAL:
441
+ result, delta = pair
442
+ now = time.time()
443
+ ttl = expire_time - now
444
+
445
+ if (-delta * beta * math.log(random.random())) < ttl:
446
+ return result # Cache hit.
447
+
448
+ # Check whether a thread has started for early recomputation.
449
+
450
+ thread_key = key + (ENOVAL,)
451
+ thread_added = cache.add(
452
+ thread_key,
453
+ None,
454
+ expire=delta,
455
+ retry=True,
456
+ )
457
+
458
+ if thread_added:
459
+ # Start thread for early recomputation.
460
+ def recompute():
461
+ with cache:
462
+ pair = timer(*args, **kwargs)
463
+ cache.set(
464
+ key,
465
+ pair,
466
+ expire=expire,
467
+ tag=tag,
468
+ retry=True,
469
+ )
470
+
471
+ thread = threading.Thread(target=recompute)
472
+ thread.daemon = True
473
+ thread.start()
474
+
475
+ return result
476
+
477
+ pair = timer(*args, **kwargs)
478
+ cache.set(key, pair, expire=expire, tag=tag, retry=True)
479
+ return pair[0]
480
+
481
+ def __cache_key__(*args, **kwargs):
482
+ """Make key for cache given function arguments."""
483
+ return args_to_key(base, args, kwargs, typed, ignore)
484
+
485
+ wrapper.__cache_key__ = __cache_key__
486
+ return wrapper
487
+
488
+ return decorator