diff --git a/.gitattributes b/.gitattributes index 289fab4aab982e9a15464190c978edbefcbc4e09..266bf86563e54c8ef63c83b714a83df724590b52 100644 --- a/.gitattributes +++ b/.gitattributes @@ -255,3 +255,7 @@ my_container_sandbox/workspace/anaconda3/lib/python3.8/lib-dynload/_ssl.cpython- my_container_sandbox/workspace/anaconda3/lib/python3.8/lib-dynload/_testbuffer.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text my_container_sandbox/workspace/anaconda3/lib/python3.8/lib-dynload/_sqlite3.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text my_container_sandbox/workspace/anaconda3/lib/python3.8/tkinter/__pycache__/__init__.cpython-38.pyc filter=lfs diff=lfs merge=lfs -text +my_container_sandbox/workspace/anaconda3/lib/python3.8/lib-dynload/_socket.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +my_container_sandbox/workspace/anaconda3/lib/python3.8/lib-dynload/_ctypes.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/__pycache__/autopep8.cpython-38.pyc filter=lfs diff=lfs merge=lfs -text +my_container_sandbox/workspace/anaconda3/lib/python3.8/lib-dynload/_asyncio.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib-dynload/_asyncio.cpython-38-x86_64-linux-gnu.so b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib-dynload/_asyncio.cpython-38-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..22efa4b8b085bb30e6384eeede6241f263dac631 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib-dynload/_asyncio.cpython-38-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7805a83a6295ab6cccb70abde69a7ac28d986b943f490387cb7948ff8033bb36 +size 225344 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib-dynload/_ctypes.cpython-38-x86_64-linux-gnu.so b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib-dynload/_ctypes.cpython-38-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..452cc2a4bdafdf3a73dbfa172add3563893f253d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib-dynload/_ctypes.cpython-38-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbef76d31c5ef234af2c4686fc45de37fc51bb4ade60c77a893179db581ad297 +size 513728 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib-dynload/_socket.cpython-38-x86_64-linux-gnu.so b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib-dynload/_socket.cpython-38-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e671e1c8ee8bd8072c4b0f9d5fadd480ca0da347 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib-dynload/_socket.cpython-38-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f5cd0ce9e6b42fc744a208bde0d6c23947cae12cd94493072414d85cd38a86d +size 283768 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/Grammar3.8.18.final.0.pickle b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/Grammar3.8.18.final.0.pickle new file mode 100644 index 0000000000000000000000000000000000000000..56f2ba639beb4cb82f6dd8df2cc70667429c4f06 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/lib2to3/Grammar3.8.18.final.0.pickle @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3131a0354ebd272fdb9f419a5045a4e814b8d28b4482f7ba2874eb6a1d4fc228 +size 15309 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/__pycache__/autopep8.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/__pycache__/autopep8.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..089a37667000033159310e24dc10fcad4fd78b8f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/__pycache__/autopep8.cpython-38.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae195c0994a3df4625694e123f01da6ad6437932d70ddf7659a391631ae1e611 +size 100765 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a2c19c06fe14476a9bfa4f1f60de7a997a41191c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +import sys + +try: + from ._version import version as __version__ +except ImportError: + __version__ = 'unknown' + +__all__ = ['easter', 'parser', 'relativedelta', 'rrule', 'tz', + 'utils', 'zoneinfo'] + +def __getattr__(name): + import importlib + + if name in __all__: + return importlib.import_module("." + name, __name__) + raise AttributeError( + "module {!r} has not attribute {!r}".format(__name__, name) + ) + + +def __dir__(): + # __dir__ should include all the lazy-importable modules as well. + return [x for x in globals() if x not in sys.modules] + __all__ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/_common.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/_common.py new file mode 100644 index 0000000000000000000000000000000000000000..4eb2659bd2986125fcfb4afea5bae9efc2dcd1a0 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/_common.py @@ -0,0 +1,43 @@ +""" +Common code used in multiple modules. +""" + + +class weekday(object): + __slots__ = ["weekday", "n"] + + def __init__(self, weekday, n=None): + self.weekday = weekday + self.n = n + + def __call__(self, n): + if n == self.n: + return self + else: + return self.__class__(self.weekday, n) + + def __eq__(self, other): + try: + if self.weekday != other.weekday or self.n != other.n: + return False + except AttributeError: + return False + return True + + def __hash__(self): + return hash(( + self.weekday, + self.n, + )) + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] + if not self.n: + return s + else: + return "%s(%+d)" % (s, self.n) + +# vim:ts=4:sw=4:et diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/_version.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..ddda98098527a73348e694c2edb691fd625475fc --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/_version.py @@ -0,0 +1,4 @@ +# file generated by setuptools_scm +# don't change, don't track in version control +__version__ = version = '2.9.0.post0' +__version_tuple__ = version_tuple = (2, 9, 0) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/easter.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/easter.py new file mode 100644 index 0000000000000000000000000000000000000000..f74d1f7442473997245ac683b8a269a3574d1ba4 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/easter.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +""" +This module offers a generic Easter computing method for any given year, using +Western, Orthodox or Julian algorithms. +""" + +import datetime + +__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"] + +EASTER_JULIAN = 1 +EASTER_ORTHODOX = 2 +EASTER_WESTERN = 3 + + +def easter(year, method=EASTER_WESTERN): + """ + This method was ported from the work done by GM Arts, + on top of the algorithm by Claus Tondering, which was + based in part on the algorithm of Ouding (1940), as + quoted in "Explanatory Supplement to the Astronomical + Almanac", P. Kenneth Seidelmann, editor. + + This algorithm implements three different Easter + calculation methods: + + 1. Original calculation in Julian calendar, valid in + dates after 326 AD + 2. Original method, with date converted to Gregorian + calendar, valid in years 1583 to 4099 + 3. Revised method, in Gregorian calendar, valid in + years 1583 to 4099 as well + + These methods are represented by the constants: + + * ``EASTER_JULIAN = 1`` + * ``EASTER_ORTHODOX = 2`` + * ``EASTER_WESTERN = 3`` + + The default method is method 3. + + More about the algorithm may be found at: + + `GM Arts: Easter Algorithms `_ + + and + + `The Calendar FAQ: Easter `_ + + """ + + if not (1 <= method <= 3): + raise ValueError("invalid method") + + # g - Golden year - 1 + # c - Century + # h - (23 - Epact) mod 30 + # i - Number of days from March 21 to Paschal Full Moon + # j - Weekday for PFM (0=Sunday, etc) + # p - Number of days from March 21 to Sunday on or before PFM + # (-6 to 28 methods 1 & 3, to 56 for method 2) + # e - Extra days to add for method 2 (converting Julian + # date to Gregorian date) + + y = year + g = y % 19 + e = 0 + if method < 3: + # Old method + i = (19*g + 15) % 30 + j = (y + y//4 + i) % 7 + if method == 2: + # Extra dates to convert Julian to Gregorian date + e = 10 + if y > 1600: + e = e + y//100 - 16 - (y//100 - 16)//4 + else: + # New method + c = y//100 + h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30 + i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11)) + j = (y + y//4 + i + 2 - c + c//4) % 7 + + # p can be from -6 to 56 corresponding to dates 22 March to 23 May + # (later dates apply to method 2, although 23 May never actually occurs) + p = i - j + e + d = 1 + (p + 27 + (p + 6)//40) % 31 + m = 3 + (p + 26)//30 + return datetime.date(int(y), int(m), int(d)) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/relativedelta.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/relativedelta.py new file mode 100644 index 0000000000000000000000000000000000000000..cd323a549e0f182541ebcde2d2ea1adfbbd9701e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/relativedelta.py @@ -0,0 +1,599 @@ +# -*- coding: utf-8 -*- +import datetime +import calendar + +import operator +from math import copysign + +from six import integer_types +from warnings import warn + +from ._common import weekday + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) + +__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + + +class relativedelta(object): + """ + The relativedelta type is designed to be applied to an existing datetime and + can replace specific components of that datetime, or represents an interval + of time. + + It is based on the specification of the excellent work done by M.-A. Lemburg + in his + `mx.DateTime `_ extension. + However, notice that this type does *NOT* implement the same algorithm as + his work. Do *NOT* expect it to behave like mx.DateTime's counterpart. + + There are two different ways to build a relativedelta instance. The + first one is passing it two date/datetime classes:: + + relativedelta(datetime1, datetime2) + + The second one is passing it any number of the following keyword arguments:: + + relativedelta(arg1=x,arg2=y,arg3=z...) + + year, month, day, hour, minute, second, microsecond: + Absolute information (argument is singular); adding or subtracting a + relativedelta with absolute information does not perform an arithmetic + operation, but rather REPLACES the corresponding value in the + original datetime with the value(s) in relativedelta. + + years, months, weeks, days, hours, minutes, seconds, microseconds: + Relative information, may be negative (argument is plural); adding + or subtracting a relativedelta with relative information performs + the corresponding arithmetic operation on the original datetime value + with the information in the relativedelta. + + weekday: + One of the weekday instances (MO, TU, etc) available in the + relativedelta module. These instances may receive a parameter N, + specifying the Nth weekday, which could be positive or negative + (like MO(+1) or MO(-2)). Not specifying it is the same as specifying + +1. You can also use an integer, where 0=MO. This argument is always + relative e.g. if the calculated date is already Monday, using MO(1) + or MO(-1) won't change the day. To effectively make it absolute, use + it in combination with the day argument (e.g. day=1, MO(1) for first + Monday of the month). + + leapdays: + Will add given days to the date found, if year is a leap + year, and the date found is post 28 of february. + + yearday, nlyearday: + Set the yearday or the non-leap year day (jump leap days). + These are converted to day/month/leapdays information. + + There are relative and absolute forms of the keyword + arguments. The plural is relative, and the singular is + absolute. For each argument in the order below, the absolute form + is applied first (by setting each attribute to that value) and + then the relative form (by adding the value to the attribute). + + The order of attributes considered when this relativedelta is + added to a datetime is: + + 1. Year + 2. Month + 3. Day + 4. Hours + 5. Minutes + 6. Seconds + 7. Microseconds + + Finally, weekday is applied, using the rule described above. + + For example + + >>> from datetime import datetime + >>> from dateutil.relativedelta import relativedelta, MO + >>> dt = datetime(2018, 4, 9, 13, 37, 0) + >>> delta = relativedelta(hours=25, day=1, weekday=MO(1)) + >>> dt + delta + datetime.datetime(2018, 4, 2, 14, 37) + + First, the day is set to 1 (the first of the month), then 25 hours + are added, to get to the 2nd day and 14th hour, finally the + weekday is applied, but since the 2nd is already a Monday there is + no effect. + + """ + + def __init__(self, dt1=None, dt2=None, + years=0, months=0, days=0, leapdays=0, weeks=0, + hours=0, minutes=0, seconds=0, microseconds=0, + year=None, month=None, day=None, weekday=None, + yearday=None, nlyearday=None, + hour=None, minute=None, second=None, microsecond=None): + + if dt1 and dt2: + # datetime is a subclass of date. So both must be date + if not (isinstance(dt1, datetime.date) and + isinstance(dt2, datetime.date)): + raise TypeError("relativedelta only diffs datetime/date") + + # We allow two dates, or two datetimes, so we coerce them to be + # of the same type + if (isinstance(dt1, datetime.datetime) != + isinstance(dt2, datetime.datetime)): + if not isinstance(dt1, datetime.datetime): + dt1 = datetime.datetime.fromordinal(dt1.toordinal()) + elif not isinstance(dt2, datetime.datetime): + dt2 = datetime.datetime.fromordinal(dt2.toordinal()) + + self.years = 0 + self.months = 0 + self.days = 0 + self.leapdays = 0 + self.hours = 0 + self.minutes = 0 + self.seconds = 0 + self.microseconds = 0 + self.year = None + self.month = None + self.day = None + self.weekday = None + self.hour = None + self.minute = None + self.second = None + self.microsecond = None + self._has_time = 0 + + # Get year / month delta between the two + months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month) + self._set_months(months) + + # Remove the year/month delta so the timedelta is just well-defined + # time units (seconds, days and microseconds) + dtm = self.__radd__(dt2) + + # If we've overshot our target, make an adjustment + if dt1 < dt2: + compare = operator.gt + increment = 1 + else: + compare = operator.lt + increment = -1 + + while compare(dt1, dtm): + months += increment + self._set_months(months) + dtm = self.__radd__(dt2) + + # Get the timedelta between the "months-adjusted" date and dt1 + delta = dt1 - dtm + self.seconds = delta.seconds + delta.days * 86400 + self.microseconds = delta.microseconds + else: + # Check for non-integer values in integer-only quantities + if any(x is not None and x != int(x) for x in (years, months)): + raise ValueError("Non-integer years and months are " + "ambiguous and not currently supported.") + + # Relative information + self.years = int(years) + self.months = int(months) + self.days = days + weeks * 7 + self.leapdays = leapdays + self.hours = hours + self.minutes = minutes + self.seconds = seconds + self.microseconds = microseconds + + # Absolute information + self.year = year + self.month = month + self.day = day + self.hour = hour + self.minute = minute + self.second = second + self.microsecond = microsecond + + if any(x is not None and int(x) != x + for x in (year, month, day, hour, + minute, second, microsecond)): + # For now we'll deprecate floats - later it'll be an error. + warn("Non-integer value passed as absolute information. " + + "This is not a well-defined condition and will raise " + + "errors in future versions.", DeprecationWarning) + + if isinstance(weekday, integer_types): + self.weekday = weekdays[weekday] + else: + self.weekday = weekday + + yday = 0 + if nlyearday: + yday = nlyearday + elif yearday: + yday = yearday + if yearday > 59: + self.leapdays = -1 + if yday: + ydayidx = [31, 59, 90, 120, 151, 181, 212, + 243, 273, 304, 334, 366] + for idx, ydays in enumerate(ydayidx): + if yday <= ydays: + self.month = idx+1 + if idx == 0: + self.day = yday + else: + self.day = yday-ydayidx[idx-1] + break + else: + raise ValueError("invalid year day (%d)" % yday) + + self._fix() + + def _fix(self): + if abs(self.microseconds) > 999999: + s = _sign(self.microseconds) + div, mod = divmod(self.microseconds * s, 1000000) + self.microseconds = mod * s + self.seconds += div * s + if abs(self.seconds) > 59: + s = _sign(self.seconds) + div, mod = divmod(self.seconds * s, 60) + self.seconds = mod * s + self.minutes += div * s + if abs(self.minutes) > 59: + s = _sign(self.minutes) + div, mod = divmod(self.minutes * s, 60) + self.minutes = mod * s + self.hours += div * s + if abs(self.hours) > 23: + s = _sign(self.hours) + div, mod = divmod(self.hours * s, 24) + self.hours = mod * s + self.days += div * s + if abs(self.months) > 11: + s = _sign(self.months) + div, mod = divmod(self.months * s, 12) + self.months = mod * s + self.years += div * s + if (self.hours or self.minutes or self.seconds or self.microseconds + or self.hour is not None or self.minute is not None or + self.second is not None or self.microsecond is not None): + self._has_time = 1 + else: + self._has_time = 0 + + @property + def weeks(self): + return int(self.days / 7.0) + + @weeks.setter + def weeks(self, value): + self.days = self.days - (self.weeks * 7) + value * 7 + + def _set_months(self, months): + self.months = months + if abs(self.months) > 11: + s = _sign(self.months) + div, mod = divmod(self.months * s, 12) + self.months = mod * s + self.years = div * s + else: + self.years = 0 + + def normalized(self): + """ + Return a version of this object represented entirely using integer + values for the relative attributes. + + >>> relativedelta(days=1.5, hours=2).normalized() + relativedelta(days=+1, hours=+14) + + :return: + Returns a :class:`dateutil.relativedelta.relativedelta` object. + """ + # Cascade remainders down (rounding each to roughly nearest microsecond) + days = int(self.days) + + hours_f = round(self.hours + 24 * (self.days - days), 11) + hours = int(hours_f) + + minutes_f = round(self.minutes + 60 * (hours_f - hours), 10) + minutes = int(minutes_f) + + seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8) + seconds = int(seconds_f) + + microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds)) + + # Constructor carries overflow back up with call to _fix() + return self.__class__(years=self.years, months=self.months, + days=days, hours=hours, minutes=minutes, + seconds=seconds, microseconds=microseconds, + leapdays=self.leapdays, year=self.year, + month=self.month, day=self.day, + weekday=self.weekday, hour=self.hour, + minute=self.minute, second=self.second, + microsecond=self.microsecond) + + def __add__(self, other): + if isinstance(other, relativedelta): + return self.__class__(years=other.years + self.years, + months=other.months + self.months, + days=other.days + self.days, + hours=other.hours + self.hours, + minutes=other.minutes + self.minutes, + seconds=other.seconds + self.seconds, + microseconds=(other.microseconds + + self.microseconds), + leapdays=other.leapdays or self.leapdays, + year=(other.year if other.year is not None + else self.year), + month=(other.month if other.month is not None + else self.month), + day=(other.day if other.day is not None + else self.day), + weekday=(other.weekday if other.weekday is not None + else self.weekday), + hour=(other.hour if other.hour is not None + else self.hour), + minute=(other.minute if other.minute is not None + else self.minute), + second=(other.second if other.second is not None + else self.second), + microsecond=(other.microsecond if other.microsecond + is not None else + self.microsecond)) + if isinstance(other, datetime.timedelta): + return self.__class__(years=self.years, + months=self.months, + days=self.days + other.days, + hours=self.hours, + minutes=self.minutes, + seconds=self.seconds + other.seconds, + microseconds=self.microseconds + other.microseconds, + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + if not isinstance(other, datetime.date): + return NotImplemented + elif self._has_time and not isinstance(other, datetime.datetime): + other = datetime.datetime.fromordinal(other.toordinal()) + year = (self.year or other.year)+self.years + month = self.month or other.month + if self.months: + assert 1 <= abs(self.months) <= 12 + month += self.months + if month > 12: + year += 1 + month -= 12 + elif month < 1: + year -= 1 + month += 12 + day = min(calendar.monthrange(year, month)[1], + self.day or other.day) + repl = {"year": year, "month": month, "day": day} + for attr in ["hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + repl[attr] = value + days = self.days + if self.leapdays and month > 2 and calendar.isleap(year): + days += self.leapdays + ret = (other.replace(**repl) + + datetime.timedelta(days=days, + hours=self.hours, + minutes=self.minutes, + seconds=self.seconds, + microseconds=self.microseconds)) + if self.weekday: + weekday, nth = self.weekday.weekday, self.weekday.n or 1 + jumpdays = (abs(nth) - 1) * 7 + if nth > 0: + jumpdays += (7 - ret.weekday() + weekday) % 7 + else: + jumpdays += (ret.weekday() - weekday) % 7 + jumpdays *= -1 + ret += datetime.timedelta(days=jumpdays) + return ret + + def __radd__(self, other): + return self.__add__(other) + + def __rsub__(self, other): + return self.__neg__().__radd__(other) + + def __sub__(self, other): + if not isinstance(other, relativedelta): + return NotImplemented # In case the other object defines __rsub__ + return self.__class__(years=self.years - other.years, + months=self.months - other.months, + days=self.days - other.days, + hours=self.hours - other.hours, + minutes=self.minutes - other.minutes, + seconds=self.seconds - other.seconds, + microseconds=self.microseconds - other.microseconds, + leapdays=self.leapdays or other.leapdays, + year=(self.year if self.year is not None + else other.year), + month=(self.month if self.month is not None else + other.month), + day=(self.day if self.day is not None else + other.day), + weekday=(self.weekday if self.weekday is not None else + other.weekday), + hour=(self.hour if self.hour is not None else + other.hour), + minute=(self.minute if self.minute is not None else + other.minute), + second=(self.second if self.second is not None else + other.second), + microsecond=(self.microsecond if self.microsecond + is not None else + other.microsecond)) + + def __abs__(self): + return self.__class__(years=abs(self.years), + months=abs(self.months), + days=abs(self.days), + hours=abs(self.hours), + minutes=abs(self.minutes), + seconds=abs(self.seconds), + microseconds=abs(self.microseconds), + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + def __neg__(self): + return self.__class__(years=-self.years, + months=-self.months, + days=-self.days, + hours=-self.hours, + minutes=-self.minutes, + seconds=-self.seconds, + microseconds=-self.microseconds, + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + def __bool__(self): + return not (not self.years and + not self.months and + not self.days and + not self.hours and + not self.minutes and + not self.seconds and + not self.microseconds and + not self.leapdays and + self.year is None and + self.month is None and + self.day is None and + self.weekday is None and + self.hour is None and + self.minute is None and + self.second is None and + self.microsecond is None) + # Compatibility with Python 2.x + __nonzero__ = __bool__ + + def __mul__(self, other): + try: + f = float(other) + except TypeError: + return NotImplemented + + return self.__class__(years=int(self.years * f), + months=int(self.months * f), + days=int(self.days * f), + hours=int(self.hours * f), + minutes=int(self.minutes * f), + seconds=int(self.seconds * f), + microseconds=int(self.microseconds * f), + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + __rmul__ = __mul__ + + def __eq__(self, other): + if not isinstance(other, relativedelta): + return NotImplemented + if self.weekday or other.weekday: + if not self.weekday or not other.weekday: + return False + if self.weekday.weekday != other.weekday.weekday: + return False + n1, n2 = self.weekday.n, other.weekday.n + if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)): + return False + return (self.years == other.years and + self.months == other.months and + self.days == other.days and + self.hours == other.hours and + self.minutes == other.minutes and + self.seconds == other.seconds and + self.microseconds == other.microseconds and + self.leapdays == other.leapdays and + self.year == other.year and + self.month == other.month and + self.day == other.day and + self.hour == other.hour and + self.minute == other.minute and + self.second == other.second and + self.microsecond == other.microsecond) + + def __hash__(self): + return hash(( + self.weekday, + self.years, + self.months, + self.days, + self.hours, + self.minutes, + self.seconds, + self.microseconds, + self.leapdays, + self.year, + self.month, + self.day, + self.hour, + self.minute, + self.second, + self.microsecond, + )) + + def __ne__(self, other): + return not self.__eq__(other) + + def __div__(self, other): + try: + reciprocal = 1 / float(other) + except TypeError: + return NotImplemented + + return self.__mul__(reciprocal) + + __truediv__ = __div__ + + def __repr__(self): + l = [] + for attr in ["years", "months", "days", "leapdays", + "hours", "minutes", "seconds", "microseconds"]: + value = getattr(self, attr) + if value: + l.append("{attr}={value:+g}".format(attr=attr, value=value)) + for attr in ["year", "month", "day", "weekday", + "hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + l.append("{attr}={value}".format(attr=attr, value=repr(value))) + return "{classname}({attrs})".format(classname=self.__class__.__name__, + attrs=", ".join(l)) + + +def _sign(x): + return int(copysign(1, x)) + +# vim:ts=4:sw=4:et diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/rrule.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/rrule.py new file mode 100644 index 0000000000000000000000000000000000000000..571a0d2bc886a7ea4c06196b2f52e740c2ed6e9f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/rrule.py @@ -0,0 +1,1737 @@ +# -*- coding: utf-8 -*- +""" +The rrule module offers a small, complete, and very fast, implementation of +the recurrence rules documented in the +`iCalendar RFC `_, +including support for caching of results. +""" +import calendar +import datetime +import heapq +import itertools +import re +import sys +from functools import wraps +# For warning about deprecation of until and count +from warnings import warn + +from six import advance_iterator, integer_types + +from six.moves import _thread, range + +from ._common import weekday as weekdaybase + +try: + from math import gcd +except ImportError: + from fractions import gcd + +__all__ = ["rrule", "rruleset", "rrulestr", + "YEARLY", "MONTHLY", "WEEKLY", "DAILY", + "HOURLY", "MINUTELY", "SECONDLY", + "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + +# Every mask is 7 days longer to handle cross-year weekly periods. +M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 + + [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7) +M365MASK = list(M366MASK) +M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32)) +MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +MDAY365MASK = list(MDAY366MASK) +M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0)) +NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +NMDAY365MASK = list(NMDAY366MASK) +M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366) +M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) +WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55 +del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31] +MDAY365MASK = tuple(MDAY365MASK) +M365MASK = tuple(M365MASK) + +FREQNAMES = ['YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTELY', 'SECONDLY'] + +(YEARLY, + MONTHLY, + WEEKLY, + DAILY, + HOURLY, + MINUTELY, + SECONDLY) = list(range(7)) + +# Imported on demand. +easter = None +parser = None + + +class weekday(weekdaybase): + """ + This version of weekday does not allow n = 0. + """ + def __init__(self, wkday, n=None): + if n == 0: + raise ValueError("Can't create weekday with n==0") + + super(weekday, self).__init__(wkday, n) + + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) + + +def _invalidates_cache(f): + """ + Decorator for rruleset methods which may invalidate the + cached length. + """ + @wraps(f) + def inner_func(self, *args, **kwargs): + rv = f(self, *args, **kwargs) + self._invalidate_cache() + return rv + + return inner_func + + +class rrulebase(object): + def __init__(self, cache=False): + if cache: + self._cache = [] + self._cache_lock = _thread.allocate_lock() + self._invalidate_cache() + else: + self._cache = None + self._cache_complete = False + self._len = None + + def __iter__(self): + if self._cache_complete: + return iter(self._cache) + elif self._cache is None: + return self._iter() + else: + return self._iter_cached() + + def _invalidate_cache(self): + if self._cache is not None: + self._cache = [] + self._cache_complete = False + self._cache_gen = self._iter() + + if self._cache_lock.locked(): + self._cache_lock.release() + + self._len = None + + def _iter_cached(self): + i = 0 + gen = self._cache_gen + cache = self._cache + acquire = self._cache_lock.acquire + release = self._cache_lock.release + while gen: + if i == len(cache): + acquire() + if self._cache_complete: + break + try: + for j in range(10): + cache.append(advance_iterator(gen)) + except StopIteration: + self._cache_gen = gen = None + self._cache_complete = True + break + release() + yield cache[i] + i += 1 + while i < self._len: + yield cache[i] + i += 1 + + def __getitem__(self, item): + if self._cache_complete: + return self._cache[item] + elif isinstance(item, slice): + if item.step and item.step < 0: + return list(iter(self))[item] + else: + return list(itertools.islice(self, + item.start or 0, + item.stop or sys.maxsize, + item.step or 1)) + elif item >= 0: + gen = iter(self) + try: + for i in range(item+1): + res = advance_iterator(gen) + except StopIteration: + raise IndexError + return res + else: + return list(iter(self))[item] + + def __contains__(self, item): + if self._cache_complete: + return item in self._cache + else: + for i in self: + if i == item: + return True + elif i > item: + return False + return False + + # __len__() introduces a large performance penalty. + def count(self): + """ Returns the number of recurrences in this set. It will have go + through the whole recurrence, if this hasn't been done before. """ + if self._len is None: + for x in self: + pass + return self._len + + def before(self, dt, inc=False): + """ Returns the last recurrence before the given datetime instance. The + inc keyword defines what happens if dt is an occurrence. With + inc=True, if dt itself is an occurrence, it will be returned. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + last = None + if inc: + for i in gen: + if i > dt: + break + last = i + else: + for i in gen: + if i >= dt: + break + last = i + return last + + def after(self, dt, inc=False): + """ Returns the first recurrence after the given datetime instance. The + inc keyword defines what happens if dt is an occurrence. With + inc=True, if dt itself is an occurrence, it will be returned. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + if inc: + for i in gen: + if i >= dt: + return i + else: + for i in gen: + if i > dt: + return i + return None + + def xafter(self, dt, count=None, inc=False): + """ + Generator which yields up to `count` recurrences after the given + datetime instance, equivalent to `after`. + + :param dt: + The datetime at which to start generating recurrences. + + :param count: + The maximum number of recurrences to generate. If `None` (default), + dates are generated until the recurrence rule is exhausted. + + :param inc: + If `dt` is an instance of the rule and `inc` is `True`, it is + included in the output. + + :yields: Yields a sequence of `datetime` objects. + """ + + if self._cache_complete: + gen = self._cache + else: + gen = self + + # Select the comparison function + if inc: + comp = lambda dc, dtc: dc >= dtc + else: + comp = lambda dc, dtc: dc > dtc + + # Generate dates + n = 0 + for d in gen: + if comp(d, dt): + if count is not None: + n += 1 + if n > count: + break + + yield d + + def between(self, after, before, inc=False, count=1): + """ Returns all the occurrences of the rrule between after and before. + The inc keyword defines what happens if after and/or before are + themselves occurrences. With inc=True, they will be included in the + list, if they are found in the recurrence set. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + started = False + l = [] + if inc: + for i in gen: + if i > before: + break + elif not started: + if i >= after: + started = True + l.append(i) + else: + l.append(i) + else: + for i in gen: + if i >= before: + break + elif not started: + if i > after: + started = True + l.append(i) + else: + l.append(i) + return l + + +class rrule(rrulebase): + """ + That's the base of the rrule operation. It accepts all the keywords + defined in the RFC as its constructor parameters (except byday, + which was renamed to byweekday) and more. The constructor prototype is:: + + rrule(freq) + + Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, + or SECONDLY. + + .. note:: + Per RFC section 3.3.10, recurrence instances falling on invalid dates + and times are ignored rather than coerced: + + Recurrence rules may generate recurrence instances with an invalid + date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM + on a day where the local time is moved forward by an hour at 1:00 + AM). Such recurrence instances MUST be ignored and MUST NOT be + counted as part of the recurrence set. + + This can lead to possibly surprising behavior when, for example, the + start date occurs at the end of the month: + + >>> from dateutil.rrule import rrule, MONTHLY + >>> from datetime import datetime + >>> start_date = datetime(2014, 12, 31) + >>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date)) + ... # doctest: +NORMALIZE_WHITESPACE + [datetime.datetime(2014, 12, 31, 0, 0), + datetime.datetime(2015, 1, 31, 0, 0), + datetime.datetime(2015, 3, 31, 0, 0), + datetime.datetime(2015, 5, 31, 0, 0)] + + Additionally, it supports the following keyword arguments: + + :param dtstart: + The recurrence start. Besides being the base for the recurrence, + missing parameters in the final recurrence instances will also be + extracted from this date. If not given, datetime.now() will be used + instead. + :param interval: + The interval between each freq iteration. For example, when using + YEARLY, an interval of 2 means once every two years, but with HOURLY, + it means once every two hours. The default interval is 1. + :param wkst: + The week start day. Must be one of the MO, TU, WE constants, or an + integer, specifying the first day of the week. This will affect + recurrences based on weekly periods. The default week start is got + from calendar.firstweekday(), and may be modified by + calendar.setfirstweekday(). + :param count: + If given, this determines how many occurrences will be generated. + + .. note:: + As of version 2.5.0, the use of the keyword ``until`` in conjunction + with ``count`` is deprecated, to make sure ``dateutil`` is fully + compliant with `RFC-5545 Sec. 3.3.10 `_. Therefore, ``until`` and ``count`` + **must not** occur in the same call to ``rrule``. + :param until: + If given, this must be a datetime instance specifying the upper-bound + limit of the recurrence. The last recurrence in the rule is the greatest + datetime that is less than or equal to the value specified in the + ``until`` parameter. + + .. note:: + As of version 2.5.0, the use of the keyword ``until`` in conjunction + with ``count`` is deprecated, to make sure ``dateutil`` is fully + compliant with `RFC-5545 Sec. 3.3.10 `_. Therefore, ``until`` and ``count`` + **must not** occur in the same call to ``rrule``. + :param bysetpos: + If given, it must be either an integer, or a sequence of integers, + positive or negative. Each given integer will specify an occurrence + number, corresponding to the nth occurrence of the rule inside the + frequency period. For example, a bysetpos of -1 if combined with a + MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will + result in the last work day of every month. + :param bymonth: + If given, it must be either an integer, or a sequence of integers, + meaning the months to apply the recurrence to. + :param bymonthday: + If given, it must be either an integer, or a sequence of integers, + meaning the month days to apply the recurrence to. + :param byyearday: + If given, it must be either an integer, or a sequence of integers, + meaning the year days to apply the recurrence to. + :param byeaster: + If given, it must be either an integer, or a sequence of integers, + positive or negative. Each integer will define an offset from the + Easter Sunday. Passing the offset 0 to byeaster will yield the Easter + Sunday itself. This is an extension to the RFC specification. + :param byweekno: + If given, it must be either an integer, or a sequence of integers, + meaning the week numbers to apply the recurrence to. Week numbers + have the meaning described in ISO8601, that is, the first week of + the year is that containing at least four days of the new year. + :param byweekday: + If given, it must be either an integer (0 == MO), a sequence of + integers, one of the weekday constants (MO, TU, etc), or a sequence + of these constants. When given, these variables will define the + weekdays where the recurrence will be applied. It's also possible to + use an argument n for the weekday instances, which will mean the nth + occurrence of this weekday in the period. For example, with MONTHLY, + or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the + first friday of the month where the recurrence happens. Notice that in + the RFC documentation, this is specified as BYDAY, but was renamed to + avoid the ambiguity of that keyword. + :param byhour: + If given, it must be either an integer, or a sequence of integers, + meaning the hours to apply the recurrence to. + :param byminute: + If given, it must be either an integer, or a sequence of integers, + meaning the minutes to apply the recurrence to. + :param bysecond: + If given, it must be either an integer, or a sequence of integers, + meaning the seconds to apply the recurrence to. + :param cache: + If given, it must be a boolean value specifying to enable or disable + caching of results. If you will use the same rrule instance multiple + times, enabling caching will improve the performance considerably. + """ + def __init__(self, freq, dtstart=None, + interval=1, wkst=None, count=None, until=None, bysetpos=None, + bymonth=None, bymonthday=None, byyearday=None, byeaster=None, + byweekno=None, byweekday=None, + byhour=None, byminute=None, bysecond=None, + cache=False): + super(rrule, self).__init__(cache) + global easter + if not dtstart: + if until and until.tzinfo: + dtstart = datetime.datetime.now(tz=until.tzinfo).replace(microsecond=0) + else: + dtstart = datetime.datetime.now().replace(microsecond=0) + elif not isinstance(dtstart, datetime.datetime): + dtstart = datetime.datetime.fromordinal(dtstart.toordinal()) + else: + dtstart = dtstart.replace(microsecond=0) + self._dtstart = dtstart + self._tzinfo = dtstart.tzinfo + self._freq = freq + self._interval = interval + self._count = count + + # Cache the original byxxx rules, if they are provided, as the _byxxx + # attributes do not necessarily map to the inputs, and this can be + # a problem in generating the strings. Only store things if they've + # been supplied (the string retrieval will just use .get()) + self._original_rule = {} + + if until and not isinstance(until, datetime.datetime): + until = datetime.datetime.fromordinal(until.toordinal()) + self._until = until + + if self._dtstart and self._until: + if (self._dtstart.tzinfo is not None) != (self._until.tzinfo is not None): + # According to RFC5545 Section 3.3.10: + # https://tools.ietf.org/html/rfc5545#section-3.3.10 + # + # > If the "DTSTART" property is specified as a date with UTC + # > time or a date with local time and time zone reference, + # > then the UNTIL rule part MUST be specified as a date with + # > UTC time. + raise ValueError( + 'RRULE UNTIL values must be specified in UTC when DTSTART ' + 'is timezone-aware' + ) + + if count is not None and until: + warn("Using both 'count' and 'until' is inconsistent with RFC 5545" + " and has been deprecated in dateutil. Future versions will " + "raise an error.", DeprecationWarning) + + if wkst is None: + self._wkst = calendar.firstweekday() + elif isinstance(wkst, integer_types): + self._wkst = wkst + else: + self._wkst = wkst.weekday + + if bysetpos is None: + self._bysetpos = None + elif isinstance(bysetpos, integer_types): + if bysetpos == 0 or not (-366 <= bysetpos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + self._bysetpos = (bysetpos,) + else: + self._bysetpos = tuple(bysetpos) + for pos in self._bysetpos: + if pos == 0 or not (-366 <= pos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + + if self._bysetpos: + self._original_rule['bysetpos'] = self._bysetpos + + if (byweekno is None and byyearday is None and bymonthday is None and + byweekday is None and byeaster is None): + if freq == YEARLY: + if bymonth is None: + bymonth = dtstart.month + self._original_rule['bymonth'] = None + bymonthday = dtstart.day + self._original_rule['bymonthday'] = None + elif freq == MONTHLY: + bymonthday = dtstart.day + self._original_rule['bymonthday'] = None + elif freq == WEEKLY: + byweekday = dtstart.weekday() + self._original_rule['byweekday'] = None + + # bymonth + if bymonth is None: + self._bymonth = None + else: + if isinstance(bymonth, integer_types): + bymonth = (bymonth,) + + self._bymonth = tuple(sorted(set(bymonth))) + + if 'bymonth' not in self._original_rule: + self._original_rule['bymonth'] = self._bymonth + + # byyearday + if byyearday is None: + self._byyearday = None + else: + if isinstance(byyearday, integer_types): + byyearday = (byyearday,) + + self._byyearday = tuple(sorted(set(byyearday))) + self._original_rule['byyearday'] = self._byyearday + + # byeaster + if byeaster is not None: + if not easter: + from dateutil import easter + if isinstance(byeaster, integer_types): + self._byeaster = (byeaster,) + else: + self._byeaster = tuple(sorted(byeaster)) + + self._original_rule['byeaster'] = self._byeaster + else: + self._byeaster = None + + # bymonthday + if bymonthday is None: + self._bymonthday = () + self._bynmonthday = () + else: + if isinstance(bymonthday, integer_types): + bymonthday = (bymonthday,) + + bymonthday = set(bymonthday) # Ensure it's unique + + self._bymonthday = tuple(sorted(x for x in bymonthday if x > 0)) + self._bynmonthday = tuple(sorted(x for x in bymonthday if x < 0)) + + # Storing positive numbers first, then negative numbers + if 'bymonthday' not in self._original_rule: + self._original_rule['bymonthday'] = tuple( + itertools.chain(self._bymonthday, self._bynmonthday)) + + # byweekno + if byweekno is None: + self._byweekno = None + else: + if isinstance(byweekno, integer_types): + byweekno = (byweekno,) + + self._byweekno = tuple(sorted(set(byweekno))) + + self._original_rule['byweekno'] = self._byweekno + + # byweekday / bynweekday + if byweekday is None: + self._byweekday = None + self._bynweekday = None + else: + # If it's one of the valid non-sequence types, convert to a + # single-element sequence before the iterator that builds the + # byweekday set. + if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"): + byweekday = (byweekday,) + + self._byweekday = set() + self._bynweekday = set() + for wday in byweekday: + if isinstance(wday, integer_types): + self._byweekday.add(wday) + elif not wday.n or freq > MONTHLY: + self._byweekday.add(wday.weekday) + else: + self._bynweekday.add((wday.weekday, wday.n)) + + if not self._byweekday: + self._byweekday = None + elif not self._bynweekday: + self._bynweekday = None + + if self._byweekday is not None: + self._byweekday = tuple(sorted(self._byweekday)) + orig_byweekday = [weekday(x) for x in self._byweekday] + else: + orig_byweekday = () + + if self._bynweekday is not None: + self._bynweekday = tuple(sorted(self._bynweekday)) + orig_bynweekday = [weekday(*x) for x in self._bynweekday] + else: + orig_bynweekday = () + + if 'byweekday' not in self._original_rule: + self._original_rule['byweekday'] = tuple(itertools.chain( + orig_byweekday, orig_bynweekday)) + + # byhour + if byhour is None: + if freq < HOURLY: + self._byhour = {dtstart.hour} + else: + self._byhour = None + else: + if isinstance(byhour, integer_types): + byhour = (byhour,) + + if freq == HOURLY: + self._byhour = self.__construct_byset(start=dtstart.hour, + byxxx=byhour, + base=24) + else: + self._byhour = set(byhour) + + self._byhour = tuple(sorted(self._byhour)) + self._original_rule['byhour'] = self._byhour + + # byminute + if byminute is None: + if freq < MINUTELY: + self._byminute = {dtstart.minute} + else: + self._byminute = None + else: + if isinstance(byminute, integer_types): + byminute = (byminute,) + + if freq == MINUTELY: + self._byminute = self.__construct_byset(start=dtstart.minute, + byxxx=byminute, + base=60) + else: + self._byminute = set(byminute) + + self._byminute = tuple(sorted(self._byminute)) + self._original_rule['byminute'] = self._byminute + + # bysecond + if bysecond is None: + if freq < SECONDLY: + self._bysecond = ((dtstart.second,)) + else: + self._bysecond = None + else: + if isinstance(bysecond, integer_types): + bysecond = (bysecond,) + + self._bysecond = set(bysecond) + + if freq == SECONDLY: + self._bysecond = self.__construct_byset(start=dtstart.second, + byxxx=bysecond, + base=60) + else: + self._bysecond = set(bysecond) + + self._bysecond = tuple(sorted(self._bysecond)) + self._original_rule['bysecond'] = self._bysecond + + if self._freq >= HOURLY: + self._timeset = None + else: + self._timeset = [] + for hour in self._byhour: + for minute in self._byminute: + for second in self._bysecond: + self._timeset.append( + datetime.time(hour, minute, second, + tzinfo=self._tzinfo)) + self._timeset.sort() + self._timeset = tuple(self._timeset) + + def __str__(self): + """ + Output a string that would generate this RRULE if passed to rrulestr. + This is mostly compatible with RFC5545, except for the + dateutil-specific extension BYEASTER. + """ + + output = [] + h, m, s = [None] * 3 + if self._dtstart: + output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S')) + h, m, s = self._dtstart.timetuple()[3:6] + + parts = ['FREQ=' + FREQNAMES[self._freq]] + if self._interval != 1: + parts.append('INTERVAL=' + str(self._interval)) + + if self._wkst: + parts.append('WKST=' + repr(weekday(self._wkst))[0:2]) + + if self._count is not None: + parts.append('COUNT=' + str(self._count)) + + if self._until: + parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S')) + + if self._original_rule.get('byweekday') is not None: + # The str() method on weekday objects doesn't generate + # RFC5545-compliant strings, so we should modify that. + original_rule = dict(self._original_rule) + wday_strings = [] + for wday in original_rule['byweekday']: + if wday.n: + wday_strings.append('{n:+d}{wday}'.format( + n=wday.n, + wday=repr(wday)[0:2])) + else: + wday_strings.append(repr(wday)) + + original_rule['byweekday'] = wday_strings + else: + original_rule = self._original_rule + + partfmt = '{name}={vals}' + for name, key in [('BYSETPOS', 'bysetpos'), + ('BYMONTH', 'bymonth'), + ('BYMONTHDAY', 'bymonthday'), + ('BYYEARDAY', 'byyearday'), + ('BYWEEKNO', 'byweekno'), + ('BYDAY', 'byweekday'), + ('BYHOUR', 'byhour'), + ('BYMINUTE', 'byminute'), + ('BYSECOND', 'bysecond'), + ('BYEASTER', 'byeaster')]: + value = original_rule.get(key) + if value: + parts.append(partfmt.format(name=name, vals=(','.join(str(v) + for v in value)))) + + output.append('RRULE:' + ';'.join(parts)) + return '\n'.join(output) + + def replace(self, **kwargs): + """Return new rrule with same attributes except for those attributes given new + values by whichever keyword arguments are specified.""" + new_kwargs = {"interval": self._interval, + "count": self._count, + "dtstart": self._dtstart, + "freq": self._freq, + "until": self._until, + "wkst": self._wkst, + "cache": False if self._cache is None else True } + new_kwargs.update(self._original_rule) + new_kwargs.update(kwargs) + return rrule(**new_kwargs) + + def _iter(self): + year, month, day, hour, minute, second, weekday, yearday, _ = \ + self._dtstart.timetuple() + + # Some local variables to speed things up a bit + freq = self._freq + interval = self._interval + wkst = self._wkst + until = self._until + bymonth = self._bymonth + byweekno = self._byweekno + byyearday = self._byyearday + byweekday = self._byweekday + byeaster = self._byeaster + bymonthday = self._bymonthday + bynmonthday = self._bynmonthday + bysetpos = self._bysetpos + byhour = self._byhour + byminute = self._byminute + bysecond = self._bysecond + + ii = _iterinfo(self) + ii.rebuild(year, month) + + getdayset = {YEARLY: ii.ydayset, + MONTHLY: ii.mdayset, + WEEKLY: ii.wdayset, + DAILY: ii.ddayset, + HOURLY: ii.ddayset, + MINUTELY: ii.ddayset, + SECONDLY: ii.ddayset}[freq] + + if freq < HOURLY: + timeset = self._timeset + else: + gettimeset = {HOURLY: ii.htimeset, + MINUTELY: ii.mtimeset, + SECONDLY: ii.stimeset}[freq] + if ((freq >= HOURLY and + self._byhour and hour not in self._byhour) or + (freq >= MINUTELY and + self._byminute and minute not in self._byminute) or + (freq >= SECONDLY and + self._bysecond and second not in self._bysecond)): + timeset = () + else: + timeset = gettimeset(hour, minute, second) + + total = 0 + count = self._count + while True: + # Get dayset with the right frequency + dayset, start, end = getdayset(year, month, day) + + # Do the "hard" work ;-) + filtered = False + for i in dayset[start:end]: + if ((bymonth and ii.mmask[i] not in bymonth) or + (byweekno and not ii.wnomask[i]) or + (byweekday and ii.wdaymask[i] not in byweekday) or + (ii.nwdaymask and not ii.nwdaymask[i]) or + (byeaster and not ii.eastermask[i]) or + ((bymonthday or bynmonthday) and + ii.mdaymask[i] not in bymonthday and + ii.nmdaymask[i] not in bynmonthday) or + (byyearday and + ((i < ii.yearlen and i+1 not in byyearday and + -ii.yearlen+i not in byyearday) or + (i >= ii.yearlen and i+1-ii.yearlen not in byyearday and + -ii.nextyearlen+i-ii.yearlen not in byyearday)))): + dayset[i] = None + filtered = True + + # Output results + if bysetpos and timeset: + poslist = [] + for pos in bysetpos: + if pos < 0: + daypos, timepos = divmod(pos, len(timeset)) + else: + daypos, timepos = divmod(pos-1, len(timeset)) + try: + i = [x for x in dayset[start:end] + if x is not None][daypos] + time = timeset[timepos] + except IndexError: + pass + else: + date = datetime.date.fromordinal(ii.yearordinal+i) + res = datetime.datetime.combine(date, time) + if res not in poslist: + poslist.append(res) + poslist.sort() + for res in poslist: + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + if count is not None: + count -= 1 + if count < 0: + self._len = total + return + total += 1 + yield res + else: + for i in dayset[start:end]: + if i is not None: + date = datetime.date.fromordinal(ii.yearordinal + i) + for time in timeset: + res = datetime.datetime.combine(date, time) + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + if count is not None: + count -= 1 + if count < 0: + self._len = total + return + + total += 1 + yield res + + # Handle frequency and interval + fixday = False + if freq == YEARLY: + year += interval + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == MONTHLY: + month += interval + if month > 12: + div, mod = divmod(month, 12) + month = mod + year += div + if month == 0: + month = 12 + year -= 1 + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == WEEKLY: + if wkst > weekday: + day += -(weekday+1+(6-wkst))+self._interval*7 + else: + day += -(weekday-wkst)+self._interval*7 + weekday = wkst + fixday = True + elif freq == DAILY: + day += interval + fixday = True + elif freq == HOURLY: + if filtered: + # Jump to one iteration before next day + hour += ((23-hour)//interval)*interval + + if byhour: + ndays, hour = self.__mod_distance(value=hour, + byxxx=self._byhour, + base=24) + else: + ndays, hour = divmod(hour+interval, 24) + + if ndays: + day += ndays + fixday = True + + timeset = gettimeset(hour, minute, second) + elif freq == MINUTELY: + if filtered: + # Jump to one iteration before next day + minute += ((1439-(hour*60+minute))//interval)*interval + + valid = False + rep_rate = (24*60) + for j in range(rep_rate // gcd(interval, rep_rate)): + if byminute: + nhours, minute = \ + self.__mod_distance(value=minute, + byxxx=self._byminute, + base=60) + else: + nhours, minute = divmod(minute+interval, 60) + + div, hour = divmod(hour+nhours, 24) + if div: + day += div + fixday = True + filtered = False + + if not byhour or hour in byhour: + valid = True + break + + if not valid: + raise ValueError('Invalid combination of interval and ' + + 'byhour resulting in empty rule.') + + timeset = gettimeset(hour, minute, second) + elif freq == SECONDLY: + if filtered: + # Jump to one iteration before next day + second += (((86399 - (hour * 3600 + minute * 60 + second)) + // interval) * interval) + + rep_rate = (24 * 3600) + valid = False + for j in range(0, rep_rate // gcd(interval, rep_rate)): + if bysecond: + nminutes, second = \ + self.__mod_distance(value=second, + byxxx=self._bysecond, + base=60) + else: + nminutes, second = divmod(second+interval, 60) + + div, minute = divmod(minute+nminutes, 60) + if div: + hour += div + div, hour = divmod(hour, 24) + if div: + day += div + fixday = True + + if ((not byhour or hour in byhour) and + (not byminute or minute in byminute) and + (not bysecond or second in bysecond)): + valid = True + break + + if not valid: + raise ValueError('Invalid combination of interval, ' + + 'byhour and byminute resulting in empty' + + ' rule.') + + timeset = gettimeset(hour, minute, second) + + if fixday and day > 28: + daysinmonth = calendar.monthrange(year, month)[1] + if day > daysinmonth: + while day > daysinmonth: + day -= daysinmonth + month += 1 + if month == 13: + month = 1 + year += 1 + if year > datetime.MAXYEAR: + self._len = total + return + daysinmonth = calendar.monthrange(year, month)[1] + ii.rebuild(year, month) + + def __construct_byset(self, start, byxxx, base): + """ + If a `BYXXX` sequence is passed to the constructor at the same level as + `FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some + specifications which cannot be reached given some starting conditions. + + This occurs whenever the interval is not coprime with the base of a + given unit and the difference between the starting position and the + ending position is not coprime with the greatest common denominator + between the interval and the base. For example, with a FREQ of hourly + starting at 17:00 and an interval of 4, the only valid values for + BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not + coprime. + + :param start: + Specifies the starting position. + :param byxxx: + An iterable containing the list of allowed values. + :param base: + The largest allowable value for the specified frequency (e.g. + 24 hours, 60 minutes). + + This does not preserve the type of the iterable, returning a set, since + the values should be unique and the order is irrelevant, this will + speed up later lookups. + + In the event of an empty set, raises a :exception:`ValueError`, as this + results in an empty rrule. + """ + + cset = set() + + # Support a single byxxx value. + if isinstance(byxxx, integer_types): + byxxx = (byxxx, ) + + for num in byxxx: + i_gcd = gcd(self._interval, base) + # Use divmod rather than % because we need to wrap negative nums. + if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0: + cset.add(num) + + if len(cset) == 0: + raise ValueError("Invalid rrule byxxx generates an empty set.") + + return cset + + def __mod_distance(self, value, byxxx, base): + """ + Calculates the next value in a sequence where the `FREQ` parameter is + specified along with a `BYXXX` parameter at the same "level" + (e.g. `HOURLY` specified with `BYHOUR`). + + :param value: + The old value of the component. + :param byxxx: + The `BYXXX` set, which should have been generated by + `rrule._construct_byset`, or something else which checks that a + valid rule is present. + :param base: + The largest allowable value for the specified frequency (e.g. + 24 hours, 60 minutes). + + If a valid value is not found after `base` iterations (the maximum + number before the sequence would start to repeat), this raises a + :exception:`ValueError`, as no valid values were found. + + This returns a tuple of `divmod(n*interval, base)`, where `n` is the + smallest number of `interval` repetitions until the next specified + value in `byxxx` is found. + """ + accumulator = 0 + for ii in range(1, base + 1): + # Using divmod() over % to account for negative intervals + div, value = divmod(value + self._interval, base) + accumulator += div + if value in byxxx: + return (accumulator, value) + + +class _iterinfo(object): + __slots__ = ["rrule", "lastyear", "lastmonth", + "yearlen", "nextyearlen", "yearordinal", "yearweekday", + "mmask", "mrange", "mdaymask", "nmdaymask", + "wdaymask", "wnomask", "nwdaymask", "eastermask"] + + def __init__(self, rrule): + for attr in self.__slots__: + setattr(self, attr, None) + self.rrule = rrule + + def rebuild(self, year, month): + # Every mask is 7 days longer to handle cross-year weekly periods. + rr = self.rrule + if year != self.lastyear: + self.yearlen = 365 + calendar.isleap(year) + self.nextyearlen = 365 + calendar.isleap(year + 1) + firstyday = datetime.date(year, 1, 1) + self.yearordinal = firstyday.toordinal() + self.yearweekday = firstyday.weekday() + + wday = datetime.date(year, 1, 1).weekday() + if self.yearlen == 365: + self.mmask = M365MASK + self.mdaymask = MDAY365MASK + self.nmdaymask = NMDAY365MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M365RANGE + else: + self.mmask = M366MASK + self.mdaymask = MDAY366MASK + self.nmdaymask = NMDAY366MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M366RANGE + + if not rr._byweekno: + self.wnomask = None + else: + self.wnomask = [0]*(self.yearlen+7) + # no1wkst = firstwkst = self.wdaymask.index(rr._wkst) + no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7 + if no1wkst >= 4: + no1wkst = 0 + # Number of days in the year, plus the days we got + # from last year. + wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7 + else: + # Number of days in the year, minus the days we + # left in last year. + wyearlen = self.yearlen-no1wkst + div, mod = divmod(wyearlen, 7) + numweeks = div+mod//4 + for n in rr._byweekno: + if n < 0: + n += numweeks+1 + if not (0 < n <= numweeks): + continue + if n > 1: + i = no1wkst+(n-1)*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + else: + i = no1wkst + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if 1 in rr._byweekno: + # Check week number 1 of next year as well + # TODO: Check -numweeks for next year. + i = no1wkst+numweeks*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + if i < self.yearlen: + # If week starts in next year, we + # don't care about it. + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if no1wkst: + # Check last week number of last year as + # well. If no1wkst is 0, either the year + # started on week start, or week number 1 + # got days from last year, so there are no + # days from last year's last week number in + # this year. + if -1 not in rr._byweekno: + lyearweekday = datetime.date(year-1, 1, 1).weekday() + lno1wkst = (7-lyearweekday+rr._wkst) % 7 + lyearlen = 365+calendar.isleap(year-1) + if lno1wkst >= 4: + lno1wkst = 0 + lnumweeks = 52+(lyearlen + + (lyearweekday-rr._wkst) % 7) % 7//4 + else: + lnumweeks = 52+(self.yearlen-no1wkst) % 7//4 + else: + lnumweeks = -1 + if lnumweeks in rr._byweekno: + for i in range(no1wkst): + self.wnomask[i] = 1 + + if (rr._bynweekday and (month != self.lastmonth or + year != self.lastyear)): + ranges = [] + if rr._freq == YEARLY: + if rr._bymonth: + for month in rr._bymonth: + ranges.append(self.mrange[month-1:month+1]) + else: + ranges = [(0, self.yearlen)] + elif rr._freq == MONTHLY: + ranges = [self.mrange[month-1:month+1]] + if ranges: + # Weekly frequency won't get here, so we may not + # care about cross-year weekly periods. + self.nwdaymask = [0]*self.yearlen + for first, last in ranges: + last -= 1 + for wday, n in rr._bynweekday: + if n < 0: + i = last+(n+1)*7 + i -= (self.wdaymask[i]-wday) % 7 + else: + i = first+(n-1)*7 + i += (7-self.wdaymask[i]+wday) % 7 + if first <= i <= last: + self.nwdaymask[i] = 1 + + if rr._byeaster: + self.eastermask = [0]*(self.yearlen+7) + eyday = easter.easter(year).toordinal()-self.yearordinal + for offset in rr._byeaster: + self.eastermask[eyday+offset] = 1 + + self.lastyear = year + self.lastmonth = month + + def ydayset(self, year, month, day): + return list(range(self.yearlen)), 0, self.yearlen + + def mdayset(self, year, month, day): + dset = [None]*self.yearlen + start, end = self.mrange[month-1:month+1] + for i in range(start, end): + dset[i] = i + return dset, start, end + + def wdayset(self, year, month, day): + # We need to handle cross-year weeks here. + dset = [None]*(self.yearlen+7) + i = datetime.date(year, month, day).toordinal()-self.yearordinal + start = i + for j in range(7): + dset[i] = i + i += 1 + # if (not (0 <= i < self.yearlen) or + # self.wdaymask[i] == self.rrule._wkst): + # This will cross the year boundary, if necessary. + if self.wdaymask[i] == self.rrule._wkst: + break + return dset, start, i + + def ddayset(self, year, month, day): + dset = [None] * self.yearlen + i = datetime.date(year, month, day).toordinal() - self.yearordinal + dset[i] = i + return dset, i, i + 1 + + def htimeset(self, hour, minute, second): + tset = [] + rr = self.rrule + for minute in rr._byminute: + for second in rr._bysecond: + tset.append(datetime.time(hour, minute, second, + tzinfo=rr._tzinfo)) + tset.sort() + return tset + + def mtimeset(self, hour, minute, second): + tset = [] + rr = self.rrule + for second in rr._bysecond: + tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo)) + tset.sort() + return tset + + def stimeset(self, hour, minute, second): + return (datetime.time(hour, minute, second, + tzinfo=self.rrule._tzinfo),) + + +class rruleset(rrulebase): + """ The rruleset type allows more complex recurrence setups, mixing + multiple rules, dates, exclusion rules, and exclusion dates. The type + constructor takes the following keyword arguments: + + :param cache: If True, caching of results will be enabled, improving + performance of multiple queries considerably. """ + + class _genitem(object): + def __init__(self, genlist, gen): + try: + self.dt = advance_iterator(gen) + genlist.append(self) + except StopIteration: + pass + self.genlist = genlist + self.gen = gen + + def __next__(self): + try: + self.dt = advance_iterator(self.gen) + except StopIteration: + if self.genlist[0] is self: + heapq.heappop(self.genlist) + else: + self.genlist.remove(self) + heapq.heapify(self.genlist) + + next = __next__ + + def __lt__(self, other): + return self.dt < other.dt + + def __gt__(self, other): + return self.dt > other.dt + + def __eq__(self, other): + return self.dt == other.dt + + def __ne__(self, other): + return self.dt != other.dt + + def __init__(self, cache=False): + super(rruleset, self).__init__(cache) + self._rrule = [] + self._rdate = [] + self._exrule = [] + self._exdate = [] + + @_invalidates_cache + def rrule(self, rrule): + """ Include the given :py:class:`rrule` instance in the recurrence set + generation. """ + self._rrule.append(rrule) + + @_invalidates_cache + def rdate(self, rdate): + """ Include the given :py:class:`datetime` instance in the recurrence + set generation. """ + self._rdate.append(rdate) + + @_invalidates_cache + def exrule(self, exrule): + """ Include the given rrule instance in the recurrence set exclusion + list. Dates which are part of the given recurrence rules will not + be generated, even if some inclusive rrule or rdate matches them. + """ + self._exrule.append(exrule) + + @_invalidates_cache + def exdate(self, exdate): + """ Include the given datetime instance in the recurrence set + exclusion list. Dates included that way will not be generated, + even if some inclusive rrule or rdate matches them. """ + self._exdate.append(exdate) + + def _iter(self): + rlist = [] + self._rdate.sort() + self._genitem(rlist, iter(self._rdate)) + for gen in [iter(x) for x in self._rrule]: + self._genitem(rlist, gen) + exlist = [] + self._exdate.sort() + self._genitem(exlist, iter(self._exdate)) + for gen in [iter(x) for x in self._exrule]: + self._genitem(exlist, gen) + lastdt = None + total = 0 + heapq.heapify(rlist) + heapq.heapify(exlist) + while rlist: + ritem = rlist[0] + if not lastdt or lastdt != ritem.dt: + while exlist and exlist[0] < ritem: + exitem = exlist[0] + advance_iterator(exitem) + if exlist and exlist[0] is exitem: + heapq.heapreplace(exlist, exitem) + if not exlist or ritem != exlist[0]: + total += 1 + yield ritem.dt + lastdt = ritem.dt + advance_iterator(ritem) + if rlist and rlist[0] is ritem: + heapq.heapreplace(rlist, ritem) + self._len = total + + + + +class _rrulestr(object): + """ Parses a string representation of a recurrence rule or set of + recurrence rules. + + :param s: + Required, a string defining one or more recurrence rules. + + :param dtstart: + If given, used as the default recurrence start if not specified in the + rule string. + + :param cache: + If set ``True`` caching of results will be enabled, improving + performance of multiple queries considerably. + + :param unfold: + If set ``True`` indicates that a rule string is split over more + than one line and should be joined before processing. + + :param forceset: + If set ``True`` forces a :class:`dateutil.rrule.rruleset` to + be returned. + + :param compatible: + If set ``True`` forces ``unfold`` and ``forceset`` to be ``True``. + + :param ignoretz: + If set ``True``, time zones in parsed strings are ignored and a naive + :class:`datetime.datetime` object is returned. + + :param tzids: + If given, a callable or mapping used to retrieve a + :class:`datetime.tzinfo` from a string representation. + Defaults to :func:`dateutil.tz.gettz`. + + :param tzinfos: + Additional time zone names / aliases which may be present in a string + representation. See :func:`dateutil.parser.parse` for more + information. + + :return: + Returns a :class:`dateutil.rrule.rruleset` or + :class:`dateutil.rrule.rrule` + """ + + _freq_map = {"YEARLY": YEARLY, + "MONTHLY": MONTHLY, + "WEEKLY": WEEKLY, + "DAILY": DAILY, + "HOURLY": HOURLY, + "MINUTELY": MINUTELY, + "SECONDLY": SECONDLY} + + _weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3, + "FR": 4, "SA": 5, "SU": 6} + + def _handle_int(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = int(value) + + def _handle_int_list(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = [int(x) for x in value.split(',')] + + _handle_INTERVAL = _handle_int + _handle_COUNT = _handle_int + _handle_BYSETPOS = _handle_int_list + _handle_BYMONTH = _handle_int_list + _handle_BYMONTHDAY = _handle_int_list + _handle_BYYEARDAY = _handle_int_list + _handle_BYEASTER = _handle_int_list + _handle_BYWEEKNO = _handle_int_list + _handle_BYHOUR = _handle_int_list + _handle_BYMINUTE = _handle_int_list + _handle_BYSECOND = _handle_int_list + + def _handle_FREQ(self, rrkwargs, name, value, **kwargs): + rrkwargs["freq"] = self._freq_map[value] + + def _handle_UNTIL(self, rrkwargs, name, value, **kwargs): + global parser + if not parser: + from dateutil import parser + try: + rrkwargs["until"] = parser.parse(value, + ignoretz=kwargs.get("ignoretz"), + tzinfos=kwargs.get("tzinfos")) + except ValueError: + raise ValueError("invalid until date") + + def _handle_WKST(self, rrkwargs, name, value, **kwargs): + rrkwargs["wkst"] = self._weekday_map[value] + + def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs): + """ + Two ways to specify this: +1MO or MO(+1) + """ + l = [] + for wday in value.split(','): + if '(' in wday: + # If it's of the form TH(+1), etc. + splt = wday.split('(') + w = splt[0] + n = int(splt[1][:-1]) + elif len(wday): + # If it's of the form +1MO + for i in range(len(wday)): + if wday[i] not in '+-0123456789': + break + n = wday[:i] or None + w = wday[i:] + if n: + n = int(n) + else: + raise ValueError("Invalid (empty) BYDAY specification.") + + l.append(weekdays[self._weekday_map[w]](n)) + rrkwargs["byweekday"] = l + + _handle_BYDAY = _handle_BYWEEKDAY + + def _parse_rfc_rrule(self, line, + dtstart=None, + cache=False, + ignoretz=False, + tzinfos=None): + if line.find(':') != -1: + name, value = line.split(':') + if name != "RRULE": + raise ValueError("unknown parameter name") + else: + value = line + rrkwargs = {} + for pair in value.split(';'): + name, value = pair.split('=') + name = name.upper() + value = value.upper() + try: + getattr(self, "_handle_"+name)(rrkwargs, name, value, + ignoretz=ignoretz, + tzinfos=tzinfos) + except AttributeError: + raise ValueError("unknown parameter '%s'" % name) + except (KeyError, ValueError): + raise ValueError("invalid '%s': %s" % (name, value)) + return rrule(dtstart=dtstart, cache=cache, **rrkwargs) + + def _parse_date_value(self, date_value, parms, rule_tzids, + ignoretz, tzids, tzinfos): + global parser + if not parser: + from dateutil import parser + + datevals = [] + value_found = False + TZID = None + + for parm in parms: + if parm.startswith("TZID="): + try: + tzkey = rule_tzids[parm.split('TZID=')[-1]] + except KeyError: + continue + if tzids is None: + from . import tz + tzlookup = tz.gettz + elif callable(tzids): + tzlookup = tzids + else: + tzlookup = getattr(tzids, 'get', None) + if tzlookup is None: + msg = ('tzids must be a callable, mapping, or None, ' + 'not %s' % tzids) + raise ValueError(msg) + + TZID = tzlookup(tzkey) + continue + + # RFC 5445 3.8.2.4: The VALUE parameter is optional, but may be found + # only once. + if parm not in {"VALUE=DATE-TIME", "VALUE=DATE"}: + raise ValueError("unsupported parm: " + parm) + else: + if value_found: + msg = ("Duplicate value parameter found in: " + parm) + raise ValueError(msg) + value_found = True + + for datestr in date_value.split(','): + date = parser.parse(datestr, ignoretz=ignoretz, tzinfos=tzinfos) + if TZID is not None: + if date.tzinfo is None: + date = date.replace(tzinfo=TZID) + else: + raise ValueError('DTSTART/EXDATE specifies multiple timezone') + datevals.append(date) + + return datevals + + def _parse_rfc(self, s, + dtstart=None, + cache=False, + unfold=False, + forceset=False, + compatible=False, + ignoretz=False, + tzids=None, + tzinfos=None): + global parser + if compatible: + forceset = True + unfold = True + + TZID_NAMES = dict(map( + lambda x: (x.upper(), x), + re.findall('TZID=(?P[^:]+):', s) + )) + s = s.upper() + if not s.strip(): + raise ValueError("empty string") + if unfold: + lines = s.splitlines() + i = 0 + while i < len(lines): + line = lines[i].rstrip() + if not line: + del lines[i] + elif i > 0 and line[0] == " ": + lines[i-1] += line[1:] + del lines[i] + else: + i += 1 + else: + lines = s.split() + if (not forceset and len(lines) == 1 and (s.find(':') == -1 or + s.startswith('RRULE:'))): + return self._parse_rfc_rrule(lines[0], cache=cache, + dtstart=dtstart, ignoretz=ignoretz, + tzinfos=tzinfos) + else: + rrulevals = [] + rdatevals = [] + exrulevals = [] + exdatevals = [] + for line in lines: + if not line: + continue + if line.find(':') == -1: + name = "RRULE" + value = line + else: + name, value = line.split(':', 1) + parms = name.split(';') + if not parms: + raise ValueError("empty property name") + name = parms[0] + parms = parms[1:] + if name == "RRULE": + for parm in parms: + raise ValueError("unsupported RRULE parm: "+parm) + rrulevals.append(value) + elif name == "RDATE": + for parm in parms: + if parm != "VALUE=DATE-TIME": + raise ValueError("unsupported RDATE parm: "+parm) + rdatevals.append(value) + elif name == "EXRULE": + for parm in parms: + raise ValueError("unsupported EXRULE parm: "+parm) + exrulevals.append(value) + elif name == "EXDATE": + exdatevals.extend( + self._parse_date_value(value, parms, + TZID_NAMES, ignoretz, + tzids, tzinfos) + ) + elif name == "DTSTART": + dtvals = self._parse_date_value(value, parms, TZID_NAMES, + ignoretz, tzids, tzinfos) + if len(dtvals) != 1: + raise ValueError("Multiple DTSTART values specified:" + + value) + dtstart = dtvals[0] + else: + raise ValueError("unsupported property: "+name) + if (forceset or len(rrulevals) > 1 or rdatevals + or exrulevals or exdatevals): + if not parser and (rdatevals or exdatevals): + from dateutil import parser + rset = rruleset(cache=cache) + for value in rrulevals: + rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in rdatevals: + for datestr in value.split(','): + rset.rdate(parser.parse(datestr, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exrulevals: + rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exdatevals: + rset.exdate(value) + if compatible and dtstart: + rset.rdate(dtstart) + return rset + else: + return self._parse_rfc_rrule(rrulevals[0], + dtstart=dtstart, + cache=cache, + ignoretz=ignoretz, + tzinfos=tzinfos) + + def __call__(self, s, **kwargs): + return self._parse_rfc(s, **kwargs) + + +rrulestr = _rrulestr() + +# vim:ts=4:sw=4:et diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/tzwin.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/tzwin.py new file mode 100644 index 0000000000000000000000000000000000000000..cebc673e40fc376653ebf037e96f0a6d0b33e906 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/tzwin.py @@ -0,0 +1,2 @@ +# tzwin has moved to dateutil.tz.win +from .tz.win import * diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/utils.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..dd2d245a0bebcd5fc37ac20526aabbd5358dab0e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dateutil/utils.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +""" +This module offers general convenience and utility functions for dealing with +datetimes. + +.. versionadded:: 2.7.0 +""" +from __future__ import unicode_literals + +from datetime import datetime, time + + +def today(tzinfo=None): + """ + Returns a :py:class:`datetime` representing the current day at midnight + + :param tzinfo: + The time zone to attach (also used to determine the current day). + + :return: + A :py:class:`datetime.datetime` object representing the current day + at midnight. + """ + + dt = datetime.now(tzinfo) + return datetime.combine(dt.date(), time(0, tzinfo=tzinfo)) + + +def default_tzinfo(dt, tzinfo): + """ + Sets the ``tzinfo`` parameter on naive datetimes only + + This is useful for example when you are provided a datetime that may have + either an implicit or explicit time zone, such as when parsing a time zone + string. + + .. doctest:: + + >>> from dateutil.tz import tzoffset + >>> from dateutil.parser import parse + >>> from dateutil.utils import default_tzinfo + >>> dflt_tz = tzoffset("EST", -18000) + >>> print(default_tzinfo(parse('2014-01-01 12:30 UTC'), dflt_tz)) + 2014-01-01 12:30:00+00:00 + >>> print(default_tzinfo(parse('2014-01-01 12:30'), dflt_tz)) + 2014-01-01 12:30:00-05:00 + + :param dt: + The datetime on which to replace the time zone + + :param tzinfo: + The :py:class:`datetime.tzinfo` subclass instance to assign to + ``dt`` if (and only if) it is naive. + + :return: + Returns an aware :py:class:`datetime.datetime`. + """ + if dt.tzinfo is not None: + return dt + else: + return dt.replace(tzinfo=tzinfo) + + +def within_delta(dt1, dt2, delta): + """ + Useful for comparing two datetimes that may have a negligible difference + to be considered equal. + """ + delta = abs(delta) + difference = dt1 - dt2 + return -delta <= difference <= delta diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..853e2309589437b3559abeb4409de1f94f39bfc9 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/__init__.py @@ -0,0 +1,65 @@ +try: + from importlib.metadata import entry_points +except ImportError: # python < 3.8 + try: + from importlib_metadata import entry_points + except ImportError: + entry_points = None + + +from . import _version, caching +from .callbacks import Callback +from .compression import available_compressions +from .core import get_fs_token_paths, open, open_files, open_local +from .exceptions import FSTimeoutError +from .mapping import FSMap, get_mapper +from .registry import ( + available_protocols, + filesystem, + get_filesystem_class, + register_implementation, + registry, +) +from .spec import AbstractFileSystem + +__version__ = _version.get_versions()["version"] + +__all__ = [ + "AbstractFileSystem", + "FSTimeoutError", + "FSMap", + "filesystem", + "register_implementation", + "get_filesystem_class", + "get_fs_token_paths", + "get_mapper", + "open", + "open_files", + "open_local", + "registry", + "caching", + "Callback", + "available_protocols", + "available_compressions", +] + + +def process_entries(): + if entry_points is not None: + try: + eps = entry_points() + except TypeError: + pass # importlib-metadata < 0.8 + else: + if hasattr(eps, "select"): # Python 3.10+ / importlib_metadata >= 3.9.0 + specs = eps.select(group="fsspec.specs") + else: + specs = eps.get("fsspec.specs", []) + for spec in specs: + err_msg = f"Unable to load filesystem from {spec}" + register_implementation( + spec.name, spec.value.replace(":", "."), errtxt=err_msg + ) + + +process_entries() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/_version.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..ed78077f0586e46a8126d6351bc671b7e7b2fb75 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/_version.py @@ -0,0 +1,21 @@ + +# This file was generated by 'versioneer.py' (0.20) from +# revision-control system data, or from the parent directory name of an +# unpacked source archive. Distribution tarballs contain a pre-generated copy +# of this file. + +import json + +version_json = ''' +{ + "date": "2022-05-19T14:13:38-0400", + "dirty": false, + "error": null, + "full-revisionid": "148a6861481f824afb88c7c50955aa6ed4e25d32", + "version": "2022.5.0" +} +''' # END VERSION_JSON + + +def get_versions(): + return json.loads(version_json) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/archive.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/archive.py new file mode 100644 index 0000000000000000000000000000000000000000..f83d040609fcbd52ffa84b95711b898f15bfb9ed --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/archive.py @@ -0,0 +1,68 @@ +from fsspec import AbstractFileSystem +from fsspec.utils import tokenize + + +class AbstractArchiveFileSystem(AbstractFileSystem): + """ + A generic superclass for implementing Archive-based filesystems. + + Currently, it is shared amongst `ZipFileSystem`, `LibArchiveFileSystem` and + `TarFileSystem`. + """ + + def __str__(self): + return "" % (type(self).__name__, id(self)) + + __repr__ = __str__ + + def ukey(self, path): + return tokenize(path, self.fo, self.protocol) + + def _all_dirnames(self, paths): + """Returns *all* directory names for each path in paths, including intermediate ones. + + Parameters + ---------- + paths: Iterable of path strings + """ + if len(paths) == 0: + return set() + + dirnames = {self._parent(path) for path in paths} - {self.root_marker} + return dirnames | self._all_dirnames(dirnames) + + def info(self, path, **kwargs): + self._get_dirs() + path = self._strip_protocol(path) + if path in self.dir_cache: + return self.dir_cache[path] + elif path + "/" in self.dir_cache: + return self.dir_cache[path + "/"] + else: + raise FileNotFoundError(path) + + def ls(self, path, detail=False, **kwargs): + self._get_dirs() + paths = {} + for p, f in self.dir_cache.items(): + p = p.rstrip("/") + if "/" in p: + root = p.rsplit("/", 1)[0] + else: + root = "" + if root == path.rstrip("/"): + paths[p] = f + elif all( + (a == b) + for a, b in zip(path.split("/"), [""] + p.strip("/").split("/")) + ): + # root directory entry + ppath = p.rstrip("/").split("/", 1)[0] + if ppath not in paths: + out = {"name": ppath + "/", "size": 0, "type": "directory"} + paths[ppath] = out + out = list(paths.values()) + if detail: + return out + else: + return list(sorted(f["name"] for f in out)) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/asyn.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/asyn.py new file mode 100644 index 0000000000000000000000000000000000000000..c2bc64d2852f258f2b525e2d8584c1d679d38902 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/asyn.py @@ -0,0 +1,933 @@ +import asyncio +import asyncio.events +import functools +import inspect +import io +import os +import re +import sys +import threading +from contextlib import contextmanager +from glob import has_magic + +from .callbacks import _DEFAULT_CALLBACK +from .exceptions import FSTimeoutError +from .spec import AbstractBufferedFile, AbstractFileSystem +from .utils import is_exception, other_paths + +private = re.compile("_[^_]") + + +async def _runner(event, coro, result, timeout=None): + timeout = timeout if timeout else None # convert 0 or 0.0 to None + if timeout is not None: + coro = asyncio.wait_for(coro, timeout=timeout) + try: + result[0] = await coro + except Exception as ex: + result[0] = ex + finally: + event.set() + + +def sync(loop, func, *args, timeout=None, **kwargs): + """ + Make loop run coroutine until it returns. Runs in other thread + """ + timeout = timeout if timeout else None # convert 0 or 0.0 to None + # NB: if the loop is not running *yet*, it is OK to submit work + # and we will wait for it + if loop is None or loop.is_closed(): + raise RuntimeError("Loop is not running") + try: + loop0 = asyncio.events.get_running_loop() + if loop0 is loop: + raise NotImplementedError("Calling sync() from within a running loop") + except RuntimeError: + pass + coro = func(*args, **kwargs) + result = [None] + event = threading.Event() + asyncio.run_coroutine_threadsafe(_runner(event, coro, result, timeout), loop) + while True: + # this loops allows thread to get interrupted + if event.wait(1): + break + if timeout is not None: + timeout -= 1 + if timeout < 0: + raise FSTimeoutError + + return_result = result[0] + if isinstance(return_result, asyncio.TimeoutError): + # suppress asyncio.TimeoutError, raise FSTimeoutError + raise FSTimeoutError from return_result + elif isinstance(return_result, BaseException): + raise return_result + else: + return return_result + + +iothread = [None] # dedicated fsspec IO thread +loop = [None] # global event loop for any non-async instance +lock = threading.Lock() # for setting exactly one thread + + +def sync_wrapper(func, obj=None): + """Given a function, make so can be called in async or blocking contexts + + Leave obj=None if defining within a class. Pass the instance if attaching + as an attribute of the instance. + """ + + @functools.wraps(func) + def wrapper(*args, **kwargs): + self = obj or args[0] + return sync(self.loop, func, *args, **kwargs) + + return wrapper + + +@contextmanager +def _selector_policy(): + original_policy = asyncio.get_event_loop_policy() + try: + if ( + sys.version_info >= (3, 8) + and os.name == "nt" + and hasattr(asyncio, "WindowsSelectorEventLoopPolicy") + ): + asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) + + yield + finally: + asyncio.set_event_loop_policy(original_policy) + + +def get_running_loop(): + if hasattr(asyncio, "get_running_loop"): + return asyncio.get_running_loop() + else: + loop = asyncio._get_running_loop() + if loop is None: + raise RuntimeError("no running event loop") + else: + return loop + + +def get_loop(): + """Create or return the default fsspec IO loop + + The loop will be running on a separate thread. + """ + if loop[0] is None: + with lock: + # repeat the check just in case the loop got filled between the + # previous two calls from another thread + if loop[0] is None: + with _selector_policy(): + loop[0] = asyncio.new_event_loop() + th = threading.Thread(target=loop[0].run_forever, name="fsspecIO") + th.daemon = True + th.start() + iothread[0] = th + return loop[0] + + +@contextmanager +def fsspec_loop(): + """Temporarily switch the current event loop to the fsspec's + own loop, and then revert it back after the context gets + terminated. + """ + try: + original_loop = get_running_loop() + except RuntimeError: + original_loop = None + + fsspec_loop = get_loop() + try: + asyncio._set_running_loop(fsspec_loop) + yield fsspec_loop + finally: + asyncio._set_running_loop(original_loop) + + +try: + import resource +except ImportError: + resource = None + ResourceError = OSError +else: + ResourceEror = resource.error + +_DEFAULT_BATCH_SIZE = 128 +_NOFILES_DEFAULT_BATCH_SIZE = 1280 + + +def _get_batch_size(nofiles=False): + from fsspec.config import conf + + if nofiles: + if "nofiles_gather_batch_size" in conf: + return conf["nofiles_gather_batch_size"] + else: + if "gather_batch_size" in conf: + return conf["gather_batch_size"] + if nofiles: + return _NOFILES_DEFAULT_BATCH_SIZE + if resource is None: + return _DEFAULT_BATCH_SIZE + + try: + soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE) + except (ImportError, ValueError, ResourceError): + return _DEFAULT_BATCH_SIZE + + if soft_limit == resource.RLIM_INFINITY: + return -1 + else: + return soft_limit // 8 + + +async def _run_coros_in_chunks( + coros, + batch_size=None, + callback=_DEFAULT_CALLBACK, + timeout=None, + return_exceptions=False, + nofiles=False, +): + """Run the given coroutines in chunks. + + Parameters + ---------- + coros: list of coroutines to run + batch_size: int or None + Number of coroutines to submit/wait on simultaneously. + If -1, then it will not be any throttling. If + None, it will be inferred from _get_batch_size() + callback: fsspec.callbacks.Callback instance + Gets a relative_update when each coroutine completes + timeout: number or None + If given, each coroutine times out after this time. Note that, since + there are multiple batches, the total run time of this function will in + general be longer + return_exceptions: bool + Same meaning as in asyncio.gather + nofiles: bool + If inferring the batch_size, does this operation involve local files? + If yes, you normally expect smaller batches. + """ + + if batch_size is None: + batch_size = _get_batch_size(nofiles=nofiles) + + if batch_size == -1: + batch_size = len(coros) + + assert batch_size > 0 + results = [] + for start in range(0, len(coros), batch_size): + chunk = [ + asyncio.Task(asyncio.wait_for(c, timeout=timeout)) + for c in coros[start : start + batch_size] + ] + if callback is not _DEFAULT_CALLBACK: + [ + t.add_done_callback(lambda *_, **__: callback.relative_update(1)) + for t in chunk + ] + results.extend( + await asyncio.gather(*chunk, return_exceptions=return_exceptions), + ) + return results + + +# these methods should be implemented as async by any async-able backend +async_methods = [ + "_ls", + "_cat_file", + "_get_file", + "_put_file", + "_rm_file", + "_cp_file", + "_pipe_file", + "_expand_path", + "_info", + "_isfile", + "_isdir", + "_exists", + "_walk", + "_glob", + "_find", + "_du", + "_size", + "_mkdir", + "_makedirs", +] + + +class AsyncFileSystem(AbstractFileSystem): + """Async file operations, default implementations + + Passes bulk operations to asyncio.gather for concurrent operation. + + Implementations that have concurrent batch operations and/or async methods + should inherit from this class instead of AbstractFileSystem. Docstrings are + copied from the un-underscored method in AbstractFileSystem, if not given. + """ + + # note that methods do not have docstring here; they will be copied + # for _* methods and inferred for overridden methods. + + async_impl = True + disable_throttling = False + + def __init__(self, *args, asynchronous=False, loop=None, batch_size=None, **kwargs): + self.asynchronous = asynchronous + self._pid = os.getpid() + if not asynchronous: + self._loop = loop or get_loop() + else: + self._loop = None + self.batch_size = batch_size + super().__init__(*args, **kwargs) + + @property + def loop(self): + if self._pid != os.getpid(): + raise RuntimeError("This class is not fork-safe") + return self._loop + + async def _rm_file(self, path, **kwargs): + raise NotImplementedError + + async def _rm(self, path, recursive=False, batch_size=None, **kwargs): + # TODO: implement on_error + batch_size = batch_size or self.batch_size + path = await self._expand_path(path, recursive=recursive) + return await _run_coros_in_chunks( + [self._rm_file(p, **kwargs) for p in path], + batch_size=batch_size, + nofiles=True, + ) + + async def _cp_file(self, path1, path2, **kwargs): + raise NotImplementedError + + async def _copy( + self, + path1, + path2, + recursive=False, + on_error=None, + maxdepth=None, + batch_size=None, + **kwargs, + ): + if on_error is None and recursive: + on_error = "ignore" + elif on_error is None: + on_error = "raise" + + paths = await self._expand_path(path1, maxdepth=maxdepth, recursive=recursive) + path2 = other_paths(paths, path2) + batch_size = batch_size or self.batch_size + coros = [self._cp_file(p1, p2, **kwargs) for p1, p2 in zip(paths, path2)] + result = await _run_coros_in_chunks( + coros, batch_size=batch_size, return_exceptions=True, nofiles=True + ) + + for ex in filter(is_exception, result): + if on_error == "ignore" and isinstance(ex, FileNotFoundError): + continue + raise ex + + async def _pipe(self, path, value=None, batch_size=None, **kwargs): + if isinstance(path, str): + path = {path: value} + batch_size = batch_size or self.batch_size + return await _run_coros_in_chunks( + [self._pipe_file(k, v, **kwargs) for k, v in path.items()], + batch_size=batch_size, + nofiles=True, + ) + + async def _process_limits(self, url, start, end): + """Helper for "Range"-based _cat_file""" + size = None + suff = False + if start is not None and start < 0: + # if start is negative and end None, end is the "suffix length" + if end is None: + end = -start + start = "" + suff = True + else: + size = size or (await self._info(url))["size"] + start = size + start + elif start is None: + start = 0 + if not suff: + if end is not None and end < 0: + if start is not None: + size = size or (await self._info(url))["size"] + end = size + end + elif end is None: + end = "" + if isinstance(end, int): + end -= 1 # bytes range is inclusive + return "bytes=%s-%s" % (start, end) + + async def _cat_file(self, path, start=None, end=None, **kwargs): + raise NotImplementedError + + async def _cat( + self, path, recursive=False, on_error="raise", batch_size=None, **kwargs + ): + paths = await self._expand_path(path, recursive=recursive) + coros = [self._cat_file(path, **kwargs) for path in paths] + batch_size = batch_size or self.batch_size + out = await _run_coros_in_chunks( + coros, batch_size=batch_size, nofiles=True, return_exceptions=True + ) + if on_error == "raise": + ex = next(filter(is_exception, out), False) + if ex: + raise ex + if ( + len(paths) > 1 + or isinstance(path, list) + or paths[0] != self._strip_protocol(path) + ): + return { + k: v + for k, v in zip(paths, out) + if on_error != "omit" or not is_exception(v) + } + else: + return out[0] + + async def _cat_ranges( + self, paths, starts, ends, max_gap=None, batch_size=None, **kwargs + ): + # TODO: on_error + if max_gap is not None: + # use utils.merge_offset_ranges + raise NotImplementedError + if not isinstance(paths, list): + raise TypeError + if not isinstance(starts, list): + starts = [starts] * len(paths) + if not isinstance(ends, list): + ends = [starts] * len(paths) + if len(starts) != len(paths) or len(ends) != len(paths): + raise ValueError + coros = [ + self._cat_file(p, start=s, end=e, **kwargs) + for p, s, e in zip(paths, starts, ends) + ] + batch_size = batch_size or self.batch_size + return await _run_coros_in_chunks(coros, batch_size=batch_size, nofiles=True) + + async def _put_file(self, lpath, rpath, **kwargs): + raise NotImplementedError + + async def _put( + self, + lpath, + rpath, + recursive=False, + callback=_DEFAULT_CALLBACK, + batch_size=None, + **kwargs, + ): + """Copy file(s) from local. + + Copies a specific file or tree of files (if recursive=True). If rpath + ends with a "/", it will be assumed to be a directory, and target files + will go within. + + The put_file method will be called concurrently on a batch of files. The + batch_size option can configure the amount of futures that can be executed + at the same time. If it is -1, then all the files will be uploaded concurrently. + The default can be set for this instance by passing "batch_size" in the + constructor, or for all instances by setting the "gather_batch_size" key + in ``fsspec.config.conf``, falling back to 1/8th of the system limit . + """ + from .implementations.local import LocalFileSystem, make_path_posix + + rpath = self._strip_protocol(rpath) + if isinstance(lpath, str): + lpath = make_path_posix(lpath) + fs = LocalFileSystem() + lpaths = fs.expand_path(lpath, recursive=recursive) + rpaths = other_paths( + lpaths, rpath, exists=isinstance(rpath, str) and await self._isdir(rpath) + ) + + is_dir = {l: os.path.isdir(l) for l in lpaths} + rdirs = [r for l, r in zip(lpaths, rpaths) if is_dir[l]] + file_pairs = [(l, r) for l, r in zip(lpaths, rpaths) if not is_dir[l]] + + await asyncio.gather(*[self._makedirs(d, exist_ok=True) for d in rdirs]) + batch_size = batch_size or self.batch_size + + coros = [] + callback.set_size(len(file_pairs)) + for lfile, rfile in file_pairs: + callback.branch(lfile, rfile, kwargs) + coros.append(self._put_file(lfile, rfile, **kwargs)) + + return await _run_coros_in_chunks( + coros, batch_size=batch_size, callback=callback + ) + + async def _get_file(self, rpath, lpath, **kwargs): + raise NotImplementedError + + async def _get( + self, rpath, lpath, recursive=False, callback=_DEFAULT_CALLBACK, **kwargs + ): + """Copy file(s) to local. + + Copies a specific file or tree of files (if recursive=True). If lpath + ends with a "/", it will be assumed to be a directory, and target files + will go within. Can submit a list of paths, which may be glob-patterns + and will be expanded. + + The get_file method will be called concurrently on a batch of files. The + batch_size option can configure the amount of futures that can be executed + at the same time. If it is -1, then all the files will be uploaded concurrently. + The default can be set for this instance by passing "batch_size" in the + constructor, or for all instances by setting the "gather_batch_size" key + in ``fsspec.config.conf``, falling back to 1/8th of the system limit . + """ + from fsspec.implementations.local import make_path_posix + + rpath = self._strip_protocol(rpath) + lpath = make_path_posix(lpath) + rpaths = await self._expand_path(rpath, recursive=recursive) + lpaths = other_paths(rpaths, lpath) + [os.makedirs(os.path.dirname(lp), exist_ok=True) for lp in lpaths] + batch_size = kwargs.pop("batch_size", self.batch_size) + + coros = [] + callback.set_size(len(lpaths)) + for lpath, rpath in zip(lpaths, rpaths): + callback.branch(rpath, lpath, kwargs) + coros.append(self._get_file(rpath, lpath, **kwargs)) + return await _run_coros_in_chunks( + coros, batch_size=batch_size, callback=callback + ) + + async def _isfile(self, path): + try: + return (await self._info(path))["type"] == "file" + except: # noqa: E722 + return False + + async def _isdir(self, path): + try: + return (await self._info(path))["type"] == "directory" + except IOError: + return False + + async def _size(self, path): + return (await self._info(path)).get("size", None) + + async def _sizes(self, paths, batch_size=None): + batch_size = batch_size or self.batch_size + return await _run_coros_in_chunks( + [self._size(p) for p in paths], batch_size=batch_size + ) + + async def _exists(self, path): + try: + await self._info(path) + return True + except FileNotFoundError: + return False + + async def _info(self, path, **kwargs): + raise NotImplementedError + + async def _ls(self, path, detail=True, **kwargs): + raise NotImplementedError + + async def _walk(self, path, maxdepth=None, **kwargs): + path = self._strip_protocol(path) + full_dirs = {} + dirs = {} + files = {} + + detail = kwargs.pop("detail", False) + try: + listing = await self._ls(path, detail=True, **kwargs) + except (FileNotFoundError, IOError): + if detail: + yield path, {}, {} + else: + yield path, [], [] + return + + for info in listing: + # each info name must be at least [path]/part , but here + # we check also for names like [path]/part/ + pathname = info["name"].rstrip("/") + name = pathname.rsplit("/", 1)[-1] + if info["type"] == "directory" and pathname != path: + # do not include "self" path + full_dirs[pathname] = info + dirs[name] = info + elif pathname == path: + # file-like with same name as give path + files[""] = info + else: + files[name] = info + + if detail: + yield path, dirs, files + else: + yield path, list(dirs), list(files) + + if maxdepth is not None: + maxdepth -= 1 + if maxdepth < 1: + return + + for d in full_dirs: + async for _ in self._walk(d, maxdepth=maxdepth, detail=detail, **kwargs): + yield _ + + async def _glob(self, path, **kwargs): + import re + + ends = path.endswith("/") + path = self._strip_protocol(path) + indstar = path.find("*") if path.find("*") >= 0 else len(path) + indques = path.find("?") if path.find("?") >= 0 else len(path) + indbrace = path.find("[") if path.find("[") >= 0 else len(path) + + ind = min(indstar, indques, indbrace) + + detail = kwargs.pop("detail", False) + + if not has_magic(path): + root = path + depth = 1 + if ends: + path += "/*" + elif await self._exists(path): + if not detail: + return [path] + else: + return {path: await self._info(path)} + else: + if not detail: + return [] # glob of non-existent returns empty + else: + return {} + elif "/" in path[:ind]: + ind2 = path[:ind].rindex("/") + root = path[: ind2 + 1] + depth = None if "**" in path else path[ind2 + 1 :].count("/") + 1 + else: + root = "" + depth = None if "**" in path else path[ind + 1 :].count("/") + 1 + + allpaths = await self._find( + root, maxdepth=depth, withdirs=True, detail=True, **kwargs + ) + # Escape characters special to python regex, leaving our supported + # special characters in place. + # See https://www.gnu.org/software/bash/manual/html_node/Pattern-Matching.html + # for shell globbing details. + pattern = ( + "^" + + ( + path.replace("\\", r"\\") + .replace(".", r"\.") + .replace("+", r"\+") + .replace("//", "/") + .replace("(", r"\(") + .replace(")", r"\)") + .replace("|", r"\|") + .replace("^", r"\^") + .replace("$", r"\$") + .replace("{", r"\{") + .replace("}", r"\}") + .rstrip("/") + .replace("?", ".") + ) + + "$" + ) + pattern = re.sub("[*]{2}", "=PLACEHOLDER=", pattern) + pattern = re.sub("[*]", "[^/]*", pattern) + pattern = re.compile(pattern.replace("=PLACEHOLDER=", ".*")) + out = { + p: allpaths[p] + for p in sorted(allpaths) + if pattern.match(p.replace("//", "/").rstrip("/")) + } + if detail: + return out + else: + return list(out) + + async def _du(self, path, total=True, maxdepth=None, **kwargs): + sizes = {} + # async for? + for f in await self._find(path, maxdepth=maxdepth, **kwargs): + info = await self._info(f) + sizes[info["name"]] = info["size"] + if total: + return sum(sizes.values()) + else: + return sizes + + async def _find(self, path, maxdepth=None, withdirs=False, **kwargs): + path = self._strip_protocol(path) + out = dict() + detail = kwargs.pop("detail", False) + # async for? + async for _, dirs, files in self._walk(path, maxdepth, detail=True, **kwargs): + if withdirs: + files.update(dirs) + out.update({info["name"]: info for name, info in files.items()}) + if not out and (await self._isfile(path)): + # walk works on directories, but find should also return [path] + # when path happens to be a file + out[path] = {} + names = sorted(out) + if not detail: + return names + else: + return {name: out[name] for name in names} + + async def _expand_path(self, path, recursive=False, maxdepth=None): + if isinstance(path, str): + out = await self._expand_path([path], recursive, maxdepth) + else: + # reduce depth on each recursion level unless None or 0 + maxdepth = maxdepth if not maxdepth else maxdepth - 1 + out = set() + path = [self._strip_protocol(p) for p in path] + for p in path: # can gather here + if has_magic(p): + bit = set(await self._glob(p)) + out |= bit + if recursive: + out |= set( + await self._expand_path( + list(bit), recursive=recursive, maxdepth=maxdepth + ) + ) + continue + elif recursive: + rec = set(await self._find(p, maxdepth=maxdepth, withdirs=True)) + out |= rec + if p not in out and (recursive is False or (await self._exists(p))): + # should only check once, for the root + out.add(p) + if not out: + raise FileNotFoundError(path) + return list(sorted(out)) + + async def _mkdir(self, path, create_parents=True, **kwargs): + pass # not necessary to implement, may not have directories + + async def _makedirs(self, path, exist_ok=False): + pass # not necessary to implement, may not have directories + + async def open_async(self, path, mode="rb", **kwargs): + if "b" not in mode or kwargs.get("compression"): + raise ValueError + raise NotImplementedError + + +def mirror_sync_methods(obj): + """Populate sync and async methods for obj + + For each method will create a sync version if the name refers to an async method + (coroutine) and there is no override in the child class; will create an async + method for the corresponding sync method if there is no implementation. + + Uses the methods specified in + - async_methods: the set that an implementation is expected to provide + - default_async_methods: that can be derived from their sync version in + AbstractFileSystem + - AsyncFileSystem: async-specific default coroutines + """ + from fsspec import AbstractFileSystem + + for method in async_methods + dir(AsyncFileSystem): + if not method.startswith("_"): + continue + smethod = method[1:] + if private.match(method): + isco = inspect.iscoroutinefunction(getattr(obj, method, None)) + unsync = getattr(getattr(obj, smethod, False), "__func__", None) + is_default = unsync is getattr(AbstractFileSystem, smethod, "") + if isco and is_default: + mth = sync_wrapper(getattr(obj, method), obj=obj) + setattr(obj, smethod, mth) + if not mth.__doc__: + mth.__doc__ = getattr( + getattr(AbstractFileSystem, smethod, None), "__doc__", "" + ) + + +class FSSpecCoroutineCancel(Exception): + pass + + +def _dump_running_tasks( + printout=True, cancel=True, exc=FSSpecCoroutineCancel, with_task=False +): + import traceback + + tasks = [t for t in asyncio.tasks.all_tasks(loop[0]) if not t.done()] + if printout: + [task.print_stack() for task in tasks] + out = [ + { + "locals": task._coro.cr_frame.f_locals, + "file": task._coro.cr_frame.f_code.co_filename, + "firstline": task._coro.cr_frame.f_code.co_firstlineno, + "linelo": task._coro.cr_frame.f_lineno, + "stack": traceback.format_stack(task._coro.cr_frame), + "task": task if with_task else None, + } + for task in tasks + ] + if cancel: + for t in tasks: + cbs = t._callbacks + t.cancel() + asyncio.futures.Future.set_exception(t, exc) + asyncio.futures.Future.cancel(t) + [cb[0](t) for cb in cbs] # cancels any dependent concurrent.futures + try: + t._coro.throw(exc) # exits coro, unless explicitly handled + except exc: + pass + return out + + +class AbstractAsyncStreamedFile(AbstractBufferedFile): + # no read buffering, and always auto-commit + # TODO: readahead might still be useful here, but needs async version + + async def read(self, length=-1): + """ + Return data from cache, or fetch pieces as necessary + + Parameters + ---------- + length: int (-1) + Number of bytes to read; if <0, all remaining bytes. + """ + length = -1 if length is None else int(length) + if self.mode != "rb": + raise ValueError("File not in read mode") + if length < 0: + length = self.size - self.loc + if self.closed: + raise ValueError("I/O operation on closed file.") + if length == 0: + # don't even bother calling fetch + return b"" + out = await self._fetch_range(self.loc, self.loc + length) + self.loc += len(out) + return out + + async def write(self, data): + """ + Write data to buffer. + + Buffer only sent on flush() or if buffer is greater than + or equal to blocksize. + + Parameters + ---------- + data: bytes + Set of bytes to be written. + """ + if self.mode not in {"wb", "ab"}: + raise ValueError("File not in write mode") + if self.closed: + raise ValueError("I/O operation on closed file.") + if self.forced: + raise ValueError("This file has been force-flushed, can only close") + out = self.buffer.write(data) + self.loc += out + if self.buffer.tell() >= self.blocksize: + await self.flush() + return out + + async def close(self): + """Close file + + Finalizes writes, discards cache + """ + if getattr(self, "_unclosable", False): + return + if self.closed: + return + if self.mode == "rb": + self.cache = None + else: + if not self.forced: + await self.flush(force=True) + + if self.fs is not None: + self.fs.invalidate_cache(self.path) + self.fs.invalidate_cache(self.fs._parent(self.path)) + + self.closed = True + + async def flush(self, force=False): + if self.closed: + raise ValueError("Flush on closed file") + if force and self.forced: + raise ValueError("Force flush cannot be called more than once") + if force: + self.forced = True + + if self.mode not in {"wb", "ab"}: + # no-op to flush on read-mode + return + + if not force and self.buffer.tell() < self.blocksize: + # Defer write on small block + return + + if self.offset is None: + # Initialize a multipart upload + self.offset = 0 + try: + await self._initiate_upload() + except: # noqa: E722 + self.closed = True + raise + + if self._upload_chunk(final=force) is not False: + self.offset += self.buffer.seek(0, 2) + self.buffer = io.BytesIO() + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.close() + + async def _fetch_range(self, start, end): + raise NotImplementedError + + async def _initiate_upload(self): + raise NotImplementedError + + async def _upload_chunk(self, final=False): + raise NotImplementedError diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/caching.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/caching.py new file mode 100644 index 0000000000000000000000000000000000000000..421fe7aaed62299779cff9d3cdd89a68d8d929f9 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/caching.py @@ -0,0 +1,521 @@ +import functools +import io +import logging +import math +import os +import warnings + +logger = logging.getLogger("fsspec") + + +class BaseCache(object): + """Pass-though cache: doesn't keep anything, calls every time + + Acts as base class for other cachers + + Parameters + ---------- + blocksize: int + How far to read ahead in numbers of bytes + fetcher: func + Function of the form f(start, end) which gets bytes from remote as + specified + size: int + How big this file is + """ + + name = "none" + + def __init__(self, blocksize, fetcher, size): + self.blocksize = blocksize + self.fetcher = fetcher + self.size = size + + def _fetch(self, start, stop): + if start is None: + start = 0 + if stop is None: + stop = self.size + if start >= self.size or start >= stop: + return b"" + return self.fetcher(start, stop) + + +class MMapCache(BaseCache): + """memory-mapped sparse file cache + + Opens temporary file, which is filled blocks-wise when data is requested. + Ensure there is enough disc space in the temporary location. + + This cache method might only work on posix + """ + + name = "mmap" + + def __init__(self, blocksize, fetcher, size, location=None, blocks=None): + super().__init__(blocksize, fetcher, size) + self.blocks = set() if blocks is None else blocks + self.location = location + self.cache = self._makefile() + + def _makefile(self): + import mmap + import tempfile + + if self.size == 0: + return bytearray() + + # posix version + if self.location is None or not os.path.exists(self.location): + if self.location is None: + fd = tempfile.TemporaryFile() + self.blocks = set() + else: + fd = io.open(self.location, "wb+") + fd.seek(self.size - 1) + fd.write(b"1") + fd.flush() + else: + fd = io.open(self.location, "rb+") + + return mmap.mmap(fd.fileno(), self.size) + + def _fetch(self, start, end): + logger.debug(f"MMap cache fetching {start}-{end}") + if start is None: + start = 0 + if end is None: + end = self.size + if start >= self.size or start >= end: + return b"" + start_block = start // self.blocksize + end_block = end // self.blocksize + need = [i for i in range(start_block, end_block + 1) if i not in self.blocks] + while need: + # TODO: not a for loop so we can consolidate blocks later to + # make fewer fetch calls; this could be parallel + i = need.pop(0) + sstart = i * self.blocksize + send = min(sstart + self.blocksize, self.size) + logger.debug(f"MMap get block #{i} ({sstart}-{send}") + self.cache[sstart:send] = self.fetcher(sstart, send) + self.blocks.add(i) + + return self.cache[start:end] + + def __getstate__(self): + state = self.__dict__.copy() + # Remove the unpicklable entries. + del state["cache"] + return state + + def __setstate__(self, state): + # Restore instance attributes + self.__dict__.update(state) + self.cache = self._makefile() + + +class ReadAheadCache(BaseCache): + """Cache which reads only when we get beyond a block of data + + This is a much simpler version of BytesCache, and does not attempt to + fill holes in the cache or keep fragments alive. It is best suited to + many small reads in a sequential order (e.g., reading lines from a file). + """ + + name = "readahead" + + def __init__(self, blocksize, fetcher, size): + super().__init__(blocksize, fetcher, size) + self.cache = b"" + self.start = 0 + self.end = 0 + + def _fetch(self, start, end): + if start is None: + start = 0 + if end is None or end > self.size: + end = self.size + if start >= self.size or start >= end: + return b"" + l = end - start + if start >= self.start and end <= self.end: + # cache hit + return self.cache[start - self.start : end - self.start] + elif self.start <= start < self.end: + # partial hit + part = self.cache[start - self.start :] + l -= len(part) + start = self.end + else: + # miss + part = b"" + end = min(self.size, end + self.blocksize) + self.cache = self.fetcher(start, end) # new block replaces old + self.start = start + self.end = self.start + len(self.cache) + return part + self.cache[:l] + + +class FirstChunkCache(BaseCache): + """Caches the first block of a file only + + This may be useful for file types where the metadata is stored in the header, + but is randomly accessed. + """ + + name = "first" + + def __init__(self, blocksize, fetcher, size): + super().__init__(blocksize, fetcher, size) + self.cache = None + + def _fetch(self, start, end): + start = start or 0 + end = end or self.size + if start < self.blocksize: + if self.cache is None: + if end > self.blocksize: + data = self.fetcher(0, end) + self.cache = data[: self.blocksize] + return data[start:] + self.cache = self.fetcher(0, self.blocksize) + part = self.cache[start:end] + if end > self.blocksize: + part += self.fetcher(self.blocksize, end) + return part + else: + return self.fetcher(start, end) + + +class BlockCache(BaseCache): + """ + Cache holding memory as a set of blocks. + + Requests are only ever made `blocksize` at a time, and are + stored in an LRU cache. The least recently accessed block is + discarded when more than `maxblocks` are stored. + + Parameters + ---------- + blocksize : int + The number of bytes to store in each block. + Requests are only ever made for `blocksize`, so this + should balance the overhead of making a request against + the granularity of the blocks. + fetcher : Callable + size : int + The total size of the file being cached. + maxblocks : int + The maximum number of blocks to cache for. The maximum memory + use for this cache is then ``blocksize * maxblocks``. + """ + + name = "blockcache" + + def __init__(self, blocksize, fetcher, size, maxblocks=32): + super().__init__(blocksize, fetcher, size) + self.nblocks = math.ceil(size / blocksize) + self.maxblocks = maxblocks + self._fetch_block_cached = functools.lru_cache(maxblocks)(self._fetch_block) + + def __repr__(self): + return "".format( + self.blocksize, self.size, self.nblocks + ) + + def cache_info(self): + """ + The statistics on the block cache. + + Returns + ------- + NamedTuple + Returned directly from the LRU Cache used internally. + """ + return self._fetch_block_cached.cache_info() + + def __getstate__(self): + state = self.__dict__ + del state["_fetch_block_cached"] + return state + + def __setstate__(self, state): + self.__dict__.update(state) + self._fetch_block_cached = functools.lru_cache(state["maxblocks"])( + self._fetch_block + ) + + def _fetch(self, start, end): + if start is None: + start = 0 + if end is None: + end = self.size + if start >= self.size or start >= end: + return b"" + + # byte position -> block numbers + start_block_number = start // self.blocksize + end_block_number = end // self.blocksize + + # these are cached, so safe to do multiple calls for the same start and end. + for block_number in range(start_block_number, end_block_number + 1): + self._fetch_block_cached(block_number) + + return self._read_cache( + start, + end, + start_block_number=start_block_number, + end_block_number=end_block_number, + ) + + def _fetch_block(self, block_number): + """ + Fetch the block of data for `block_number`. + """ + if block_number > self.nblocks: + raise ValueError( + "'block_number={}' is greater than the number of blocks ({})".format( + block_number, self.nblocks + ) + ) + + start = block_number * self.blocksize + end = start + self.blocksize + logger.info("BlockCache fetching block %d", block_number) + block_contents = super()._fetch(start, end) + return block_contents + + def _read_cache(self, start, end, start_block_number, end_block_number): + """ + Read from our block cache. + + Parameters + ---------- + start, end : int + The start and end byte positions. + start_block_number, end_block_number : int + The start and end block numbers. + """ + start_pos = start % self.blocksize + end_pos = end % self.blocksize + + if start_block_number == end_block_number: + block = self._fetch_block_cached(start_block_number) + return block[start_pos:end_pos] + + else: + # read from the initial + out = [] + out.append(self._fetch_block_cached(start_block_number)[start_pos:]) + + # intermediate blocks + # Note: it'd be nice to combine these into one big request. However + # that doesn't play nicely with our LRU cache. + for block_number in range(start_block_number + 1, end_block_number): + out.append(self._fetch_block_cached(block_number)) + + # final block + out.append(self._fetch_block_cached(end_block_number)[:end_pos]) + + return b"".join(out) + + +class BytesCache(BaseCache): + """Cache which holds data in a in-memory bytes object + + Implements read-ahead by the block size, for semi-random reads progressing + through the file. + + Parameters + ---------- + trim: bool + As we read more data, whether to discard the start of the buffer when + we are more than a blocksize ahead of it. + """ + + name = "bytes" + + def __init__(self, blocksize, fetcher, size, trim=True): + super().__init__(blocksize, fetcher, size) + self.cache = b"" + self.start = None + self.end = None + self.trim = trim + + def _fetch(self, start, end): + # TODO: only set start/end after fetch, in case it fails? + # is this where retry logic might go? + if start is None: + start = 0 + if end is None: + end = self.size + if start >= self.size or start >= end: + return b"" + if ( + self.start is not None + and start >= self.start + and self.end is not None + and end < self.end + ): + # cache hit: we have all the required data + offset = start - self.start + return self.cache[offset : offset + end - start] + + if self.blocksize: + bend = min(self.size, end + self.blocksize) + else: + bend = end + + if bend == start or start > self.size: + return b"" + + if (self.start is None or start < self.start) and ( + self.end is None or end > self.end + ): + # First read, or extending both before and after + self.cache = self.fetcher(start, bend) + self.start = start + elif start < self.start: + if self.end - end > self.blocksize: + self.cache = self.fetcher(start, bend) + self.start = start + else: + new = self.fetcher(start, self.start) + self.start = start + self.cache = new + self.cache + elif bend > self.end: + if self.end > self.size: + pass + elif end - self.end > self.blocksize: + self.cache = self.fetcher(start, bend) + self.start = start + else: + new = self.fetcher(self.end, bend) + self.cache = self.cache + new + + self.end = self.start + len(self.cache) + offset = start - self.start + out = self.cache[offset : offset + end - start] + if self.trim: + num = (self.end - self.start) // (self.blocksize + 1) + if num > 1: + self.start += self.blocksize * num + self.cache = self.cache[self.blocksize * num :] + return out + + def __len__(self): + return len(self.cache) + + +class AllBytes(BaseCache): + """Cache entire contents of the file""" + + name = "all" + + def __init__(self, blocksize=None, fetcher=None, size=None, data=None): + super().__init__(blocksize, fetcher, size) + if data is None: + data = self.fetcher(0, self.size) + self.data = data + + def _fetch(self, start, end): + return self.data[start:end] + + +class KnownPartsOfAFile(BaseCache): + """ + Cache holding known file parts. + + Parameters + ---------- + blocksize: int + How far to read ahead in numbers of bytes + fetcher: func + Function of the form f(start, end) which gets bytes from remote as + specified + size: int + How big this file is + data: dict + A dictionary mapping explicit `(start, stop)` file-offset tuples + with known bytes. + strict: bool, default True + Whether to fetch reads that go beyond a known byte-range boundary. + If `False`, any read that ends outside a known part will be zero + padded. Note that zero padding will not be used for reads that + begin outside a known byte-range. + """ + + name = "parts" + + def __init__(self, blocksize, fetcher, size, data={}, strict=True, **_): + super(KnownPartsOfAFile, self).__init__(blocksize, fetcher, size) + self.strict = strict + + # simple consolidation of contiguous blocks + if data: + old_offsets = sorted(list(data.keys())) + offsets = [old_offsets[0]] + blocks = [data.pop(old_offsets[0])] + for start, stop in old_offsets[1:]: + start0, stop0 = offsets[-1] + if start == stop0: + offsets[-1] = (start0, stop) + blocks[-1] += data.pop((start, stop)) + else: + offsets.append((start, stop)) + blocks.append(data.pop((start, stop))) + + self.data = dict(zip(offsets, blocks)) + else: + self.data = data + + def _fetch(self, start, stop): + out = b"" + for (loc0, loc1), data in self.data.items(): + # If self.strict=False, use zero-padded data + # for reads beyond the end of a "known" buffer + if loc0 <= start < loc1: + off = start - loc0 + out = data[off : off + stop - start] + if not self.strict or loc0 <= stop <= loc1: + # The request is within a known range, or + # it begins within a known range, and we + # are allowed to pad reads beyond the + # buffer with zero + out += b"\x00" * (stop - start - len(out)) + return out + else: + # The request ends outside a known range, + # and we are being "strict" about reads + # beyond the buffer + start = loc1 + break + + # We only get here if there is a request outside the + # known parts of the file. In an ideal world, this + # should never happen + if self.fetcher is None: + # We cannot fetch the data, so raise an error + raise ValueError(f"Read is outside the known file parts: {(start, stop)}. ") + # We can fetch the data, but should warn the user + # that this may be slow + warnings.warn( + f"Read is outside the known file parts: {(start, stop)}. " + f"IO/caching performance may be poor!" + ) + logger.debug(f"KnownPartsOfAFile cache fetching {start}-{stop}") + return out + super()._fetch(start, stop) + + +caches = { + "none": BaseCache, + None: BaseCache, + "mmap": MMapCache, + "bytes": BytesCache, + "readahead": ReadAheadCache, + "block": BlockCache, + "first": FirstChunkCache, + "all": AllBytes, + "parts": KnownPartsOfAFile, +} diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/callbacks.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/callbacks.py new file mode 100644 index 0000000000000000000000000000000000000000..8c80f519c830ae139ff3c350819f4fb6f2dd77ce --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/callbacks.py @@ -0,0 +1,238 @@ +class Callback: + """ + Base class and interface for callback mechanism + + This class can be used directly for monitoring file transfers by + providing ``callback=Callback(hooks=...)`` (see the ``hooks`` argument, + below), or subclassed for more specialised behaviour. + + Parameters + ---------- + size: int (optional) + Nominal quantity for the value that corresponds to a complete + transfer, e.g., total number of tiles or total number of + bytes + value: int (0) + Starting internal counter value + hooks: dict or None + A dict of named functions to be called on each update. The signature + of these must be ``f(size, value, **kwargs)`` + """ + + def __init__(self, size=None, value=0, hooks=None, **kwargs): + self.size = size + self.value = value + self.hooks = hooks or {} + self.kw = kwargs + + def set_size(self, size): + """ + Set the internal maximum size attribute + + Usually called if not initially set at instantiation. Note that this + triggers a ``call()``. + + Parameters + ---------- + size: int + """ + self.size = size + self.call() + + def absolute_update(self, value): + """ + Set the internal value state + + Triggers ``call()`` + + Parameters + ---------- + value: int + """ + self.value = value + self.call() + + def relative_update(self, inc=1): + """ + Delta increment the internal counter + + Triggers ``call()`` + + Parameters + ---------- + inc: int + """ + self.value += inc + self.call() + + def call(self, hook_name=None, **kwargs): + """ + Execute hook(s) with current state + + Each function is passed the internal size and current value + + Parameters + ---------- + hook_name: str or None + If given, execute on this hook + kwargs: passed on to (all) hook(s) + """ + if not self.hooks: + return + kw = self.kw.copy() + kw.update(kwargs) + if hook_name: + if hook_name not in self.hooks: + return + return self.hooks[hook_name](self.size, self.value, **kw) + for hook in self.hooks.values() or []: + hook(self.size, self.value, **kw) + + def wrap(self, iterable): + """ + Wrap an iterable to call ``relative_update`` on each iterations + + Parameters + ---------- + iterable: Iterable + The iterable that is being wrapped + """ + for item in iterable: + self.relative_update() + yield item + + def branch(self, path_1, path_2, kwargs): + """ + Set callbacks for child transfers + + If this callback is operating at a higher level, e.g., put, which may + trigger transfers that can also be monitored. The passed kwargs are + to be *mutated* to add ``callback=``, if this class supports branching + to children. + + Parameters + ---------- + path_1: str + Child's source path + path_2: str + Child's destination path + kwargs: dict + arguments passed to child method, e.g., put_file. + + Returns + ------- + + """ + return None + + def no_op(self, *_, **__): + pass + + def __getattr__(self, item): + """ + If undefined methods are called on this class, nothing happens + """ + return self.no_op + + @classmethod + def as_callback(cls, maybe_callback=None): + """Transform callback=... into Callback instance + + For the special value of ``None``, return the global instance of + ``NoOpCallback``. This is an alternative to including + ``callback=_DEFAULT_CALLBACK`` directly in a method signature. + """ + if maybe_callback is None: + return _DEFAULT_CALLBACK + return maybe_callback + + +class NoOpCallback(Callback): + """ + This implementation of Callback does exactly nothing + """ + + def call(self, *args, **kwargs): + return None + + +class DotPrinterCallback(Callback): + """ + Simple example Callback implementation + + Almost identical to Callback with a hook that prints a char; here we + demonstrate how the outer layer may print "#" and the inner layer "." + """ + + def __init__(self, chr_to_print="#", **kwargs): + self.chr = chr_to_print + super().__init__(**kwargs) + + def branch(self, path_1, path_2, kwargs): + """Mutate kwargs to add new instance with different print char""" + kwargs["callback"] = DotPrinterCallback(".") + + def call(self, **kwargs): + """Just outputs a character""" + print(self.chr, end="") + + +class TqdmCallback(Callback): + """ + A callback to display a progress bar using tqdm + + Parameters + ---------- + tqdm_kwargs : dict, (optional) + Any argument accepted by the tqdm constructor. + See the `tqdm doc `_. + Will be forwarded to tqdm. + + Examples + -------- + >>> import fsspec + >>> from fsspec.callbacks import TqdmCallback + >>> fs = fsspec.filesystem("memory") + >>> path2distant_data = "/your-path" + >>> fs.upload( + ".", + path2distant_data, + recursive=True, + callback=TqdmCallback(), + ) + + You can forward args to tqdm using the `tqdm_kwargs` parameter. + + >>> fs.upload( + ".", + path2distant_data, + recursive=True, + callback=TqdmCallback(tqdm_kwargs={"desc": "Your tqdm description"}), + ) + """ + + def __init__(self, tqdm_kwargs=None, *args, **kwargs): + try: + import tqdm + + self._tqdm = tqdm + except ImportError as exce: + raise ImportError( + "Using TqdmCallback requires tqdm to be installed" + ) from exce + + self._tqdm_kwargs = tqdm_kwargs or {} + super().__init__(*args, **kwargs) + + def set_size(self, size): + self.tqdm = self._tqdm.tqdm(total=size, **self._tqdm_kwargs) + + def relative_update(self, inc=1): + self.tqdm.update(inc) + + def __del__(self): + self.tqdm.close() + self.tqdm = None + + +_DEFAULT_CALLBACK = NoOpCallback() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/compression.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/compression.py new file mode 100644 index 0000000000000000000000000000000000000000..d7a13c9feba04209498abb243725875d9d481570 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/compression.py @@ -0,0 +1,173 @@ +"""Helper functions for a standard streaming compression API""" +from bz2 import BZ2File +from zipfile import ZipFile + +import fsspec.utils +from fsspec.spec import AbstractBufferedFile + + +def noop_file(file, mode, **kwargs): + return file + + +# TODO: files should also be available as contexts +# should be functions of the form func(infile, mode=, **kwargs) -> file-like +compr = {None: noop_file} + + +def register_compression(name, callback, extensions, force=False): + """Register an "inferable" file compression type. + + Registers transparent file compression type for use with fsspec.open. + Compression can be specified by name in open, or "infer"-ed for any files + ending with the given extensions. + + Args: + name: (str) The compression type name. Eg. "gzip". + callback: A callable of form (infile, mode, **kwargs) -> file-like. + Accepts an input file-like object, the target mode and kwargs. + Returns a wrapped file-like object. + extensions: (str, Iterable[str]) A file extension, or list of file + extensions for which to infer this compression scheme. Eg. "gz". + force: (bool) Force re-registration of compression type or extensions. + + Raises: + ValueError: If name or extensions already registered, and not force. + + """ + if isinstance(extensions, str): + extensions = [extensions] + + # Validate registration + if name in compr and not force: + raise ValueError("Duplicate compression registration: %s" % name) + + for ext in extensions: + if ext in fsspec.utils.compressions and not force: + raise ValueError( + "Duplicate compression file extension: %s (%s)" % (ext, name) + ) + + compr[name] = callback + + for ext in extensions: + fsspec.utils.compressions[ext] = name + + +def unzip(infile, mode="rb", filename=None, **kwargs): + if "r" not in mode: + filename = filename or "file" + z = ZipFile(infile, mode="w", **kwargs) + fo = z.open(filename, mode="w") + fo.close = lambda closer=fo.close: closer() or z.close() + return fo + z = ZipFile(infile) + if filename is None: + filename = z.namelist()[0] + return z.open(filename, mode="r", **kwargs) + + +register_compression("zip", unzip, "zip") +register_compression("bz2", BZ2File, "bz2") + +try: # pragma: no cover + from isal import igzip + + # igzip is meant to be used as a faster drop in replacement to gzip + # so its api and functions are the same as the stdlib’s module. Except + # where ISA-L does not support the same calls as zlib + # (See https://python-isal.readthedocs.io/). + + register_compression("gzip", igzip.IGzipFile, "gz") +except ImportError: + from gzip import GzipFile + + register_compression( + "gzip", lambda f, **kwargs: GzipFile(fileobj=f, **kwargs), "gz" + ) + +try: + from lzma import LZMAFile + + register_compression("lzma", LZMAFile, "xz") + register_compression("xz", LZMAFile, "xz", force=True) +except ImportError: + pass + +try: + import lzmaffi + + register_compression("lzma", lzmaffi.LZMAFile, "xz", force=True) + register_compression("xz", lzmaffi.LZMAFile, "xz", force=True) +except ImportError: + pass + + +class SnappyFile(AbstractBufferedFile): + def __init__(self, infile, mode, **kwargs): + import snappy + + super().__init__( + fs=None, path="snappy", mode=mode.strip("b") + "b", size=999999999, **kwargs + ) + self.infile = infile + if "r" in mode: + self.codec = snappy.StreamDecompressor() + else: + self.codec = snappy.StreamCompressor() + + def _upload_chunk(self, final=False): + self.buffer.seek(0) + out = self.codec.add_chunk(self.buffer.read()) + self.infile.write(out) + return True + + def seek(self, loc, whence=0): + raise NotImplementedError("SnappyFile is not seekable") + + def seekable(self): + return False + + def _fetch_range(self, start, end): + """Get the specified set of bytes from remote""" + data = self.infile.read(end - start) + return self.codec.decompress(data) + + +try: + import snappy + + snappy.compress + # Snappy may use the .sz file extension, but this is not part of the + # standard implementation. + register_compression("snappy", SnappyFile, []) + +except (ImportError, NameError): + pass + +try: + import lz4.frame + + register_compression("lz4", lz4.frame.open, "lz4") +except ImportError: + pass + +try: + import zstandard as zstd + + def zstandard_file(infile, mode="rb"): + if "r" in mode: + cctx = zstd.ZstdDecompressor() + return cctx.stream_reader(infile) + else: + cctx = zstd.ZstdCompressor(level=10) + return cctx.stream_writer(infile) + + register_compression("zstd", zstandard_file, "zst") +except ImportError: + pass + + +def available_compressions(): + """Return a list of the implemented compressions.""" + return list(compr) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/config.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/config.py new file mode 100644 index 0000000000000000000000000000000000000000..5db4fd78bf704c6b70a3e59ff3a7fa4bd6aa875d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/config.py @@ -0,0 +1,99 @@ +import configparser +import json +import os + +conf = {} +default_conf_dir = os.path.join(os.path.expanduser("~"), ".config/fsspec") +conf_dir = os.environ.get("FSSPEC_CONFIG_DIR", default_conf_dir) + + +def set_conf_env(conf_dict, envdict=os.environ): + """Set config values from environment variables + + Looks for variable of the form ``FSSPEC__``. + There is no attempt to convert strings, but the kwarg keys will + be lower-cased. + + Parameters + ---------- + conf_dict : dict(str, dict) + This dict will be mutated + envdict : dict-like(str, str) + Source for the values - usually the real environment + """ + for key in envdict: + if key.startswith("FSSPEC"): + if key.count("_") < 2: + continue + _, proto, kwarg = key.split("_", 2) + conf_dict.setdefault(proto.lower(), {})[kwarg.lower()] = envdict[key] + + +def set_conf_files(cdir, conf_dict): + """Set config values from files + + Scans for INI and JSON files in the given dictionary, and uses their + contents to set the config. In case of repeated values, later values + win. + + In the case of INI files, all values are strings, and these will not + be converted. + + Parameters + ---------- + cdir : str + Directory to search + conf_dict : dict(str, dict) + This dict will be mutated + """ + if not os.path.isdir(cdir): + return + allfiles = sorted(os.listdir(cdir)) + for fn in allfiles: + if fn.endswith(".ini"): + ini = configparser.ConfigParser() + ini.read(os.path.join(cdir, fn)) + for key in ini: + if key == "DEFAULT": + continue + conf_dict.setdefault(key, {}).update(dict(ini[key])) + if fn.endswith(".json"): + with open(os.path.join(cdir, fn)) as f: + js = json.load(f) + for key in js: + conf_dict.setdefault(key, {}).update(dict(js[key])) + + +def apply_config(cls, kwargs, conf_dict=None): + """Supply default values for kwargs when instantiating class + + Augments the passed kwargs, by finding entries in the config dict + which match the classes ``.protocol`` attribute (one or more str) + + Parameters + ---------- + cls : file system implementation + kwargs : dict + conf_dict : dict of dict + Typically this is the global configuration + + Returns + ------- + dict : the modified set of kwargs + """ + if conf_dict is None: + conf_dict = conf + protos = cls.protocol if isinstance(cls.protocol, (tuple, list)) else [cls.protocol] + kw = {} + for proto in protos: + # default kwargs from the current state of the config + if proto in conf_dict: + kw.update(conf_dict[proto]) + # explicit kwargs always win + kw.update(**kwargs) + kwargs = kw + return kwargs + + +set_conf_files(conf_dir, conf) +set_conf_env(conf) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/conftest.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..6874a42c4895c3c7b973dc5d63fd4488a4e60b44 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/conftest.py @@ -0,0 +1,55 @@ +import os +import shutil +import subprocess +import sys +import time + +import pytest + +import fsspec +from fsspec.implementations.cached import CachingFileSystem + + +@pytest.fixture() +def m(): + """ + Fixture providing a memory filesystem. + """ + m = fsspec.filesystem("memory") + m.store.clear() + m.pseudo_dirs.clear() + m.pseudo_dirs.append("") + try: + yield m + finally: + m.store.clear() + m.pseudo_dirs.clear() + m.pseudo_dirs.append("") + + +@pytest.fixture +def ftp_writable(tmpdir): + """ + Fixture providing a writable FTP filesystem. + """ + pytest.importorskip("pyftpdlib") + from fsspec.implementations.ftp import FTPFileSystem + + FTPFileSystem.clear_instance_cache() # remove lingering connections + CachingFileSystem.clear_instance_cache() + d = str(tmpdir) + with open(os.path.join(d, "out"), "wb") as f: + f.write(b"hello" * 10000) + P = subprocess.Popen( + [sys.executable, "-m", "pyftpdlib", "-d", d, "-u", "user", "-P", "pass", "-w"] + ) + try: + time.sleep(1) + yield "localhost", 2121, "user", "pass" + finally: + P.terminate() + P.wait() + try: + shutil.rmtree(tmpdir) + except Exception: + pass diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/core.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/core.py new file mode 100644 index 0000000000000000000000000000000000000000..6fe462eda8ae4494bddf08407e56171c9ce8fd2a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/core.py @@ -0,0 +1,707 @@ +from __future__ import absolute_import, division, print_function + +import io +import logging +import os +import re +from glob import has_magic + +# for backwards compat, we export cache things from here too +from .caching import ( # noqa: F401 + BaseCache, + BlockCache, + BytesCache, + MMapCache, + ReadAheadCache, + caches, +) +from .compression import compr +from .registry import filesystem, get_filesystem_class +from .utils import ( + IOWrapper, + _unstrip_protocol, + build_name_function, + infer_compression, + stringify_path, + update_storage_options, +) + +logger = logging.getLogger("fsspec") + + +class OpenFile(object): + """ + File-like object to be used in a context + + Can layer (buffered) text-mode and compression over any file-system, which + are typically binary-only. + + These instances are safe to serialize, as the low-level file object + is not created until invoked using `with`. + + Parameters + ---------- + fs: FileSystem + The file system to use for opening the file. Should be a subclass or duck-type + with ``fsspec.spec.AbstractFileSystem`` + path: str + Location to open + mode: str like 'rb', optional + Mode of the opened file + compression: str or None, optional + Compression to apply + encoding: str or None, optional + The encoding to use if opened in text mode. + errors: str or None, optional + How to handle encoding errors if opened in text mode. + newline: None or str + Passed to TextIOWrapper in text mode, how to handle line endings. + """ + + def __init__( + self, + fs, + path, + mode="rb", + compression=None, + encoding=None, + errors=None, + newline=None, + ): + self.fs = fs + self.path = path + self.mode = mode + self.compression = get_compression(path, compression) + self.encoding = encoding + self.errors = errors + self.newline = newline + self.fobjects = [] + + def __reduce__(self): + return ( + OpenFile, + ( + self.fs, + self.path, + self.mode, + self.compression, + self.encoding, + self.errors, + self.newline, + ), + ) + + def __repr__(self): + return "".format(self.path) + + def __fspath__(self): + # may raise if cannot be resolved to local file + return self.open().__fspath__() + + def __enter__(self): + mode = self.mode.replace("t", "").replace("b", "") + "b" + + f = self.fs.open(self.path, mode=mode) + + self.fobjects = [f] + + if self.compression is not None: + compress = compr[self.compression] + f = compress(f, mode=mode[0]) + self.fobjects.append(f) + + if "b" not in self.mode: + # assume, for example, that 'r' is equivalent to 'rt' as in builtin + f = io.TextIOWrapper( + f, encoding=self.encoding, errors=self.errors, newline=self.newline + ) + self.fobjects.append(f) + + return self.fobjects[-1] + + def __exit__(self, *args): + self.close() + + def __del__(self): + if hasattr(self, "fobjects"): + self.fobjects.clear() # may cause cleanup of objects and close files + + @property + def full_name(self): + return _unstrip_protocol(self.path, self.fs) + + def open(self): + """Materialise this as a real open file without context + + The file should be explicitly closed to avoid enclosed file + instances persisting. This code-path monkey-patches the file-like + objects, so they can close even if the parent OpenFile object has already + been deleted; but a with-context is better style. + """ + out = self.__enter__() + closer = out.close + fobjects = self.fobjects.copy()[:-1] + mode = self.mode + + def close(): + # this func has no reference to + closer() # original close bound method of the final file-like + _close(fobjects, mode) # call close on other dependent file-likes + + try: + out.close = close + except AttributeError: + out = IOWrapper(out, lambda: _close(fobjects, mode)) + + return out + + def close(self): + """Close all encapsulated file objects""" + _close(self.fobjects, self.mode) + + +class OpenFiles(list): + """List of OpenFile instances + + Can be used in a single context, which opens and closes all of the + contained files. Normal list access to get the elements works as + normal. + + A special case is made for caching filesystems - the files will + be down/uploaded together at the start or end of the context, and + this may happen concurrently, if the target filesystem supports it. + """ + + def __init__(self, *args, mode="rb", fs=None): + self.mode = mode + self.fs = fs + self.files = [] + super().__init__(*args) + + def __enter__(self): + if self.fs is None: + raise ValueError("Context has already been used") + + fs = self.fs + while True: + if hasattr(fs, "open_many"): + # check for concurrent cache download; or set up for upload + self.files = fs.open_many(self) + return self.files + if hasattr(fs, "fs") and fs.fs is not None: + fs = fs.fs + else: + break + return [s.__enter__() for s in self] + + def __exit__(self, *args): + fs = self.fs + if "r" not in self.mode: + while True: + if hasattr(fs, "open_many"): + # check for concurrent cache upload + fs.commit_many(self.files) + self.files.clear() + return + if hasattr(fs, "fs") and fs.fs is not None: + fs = fs.fs + else: + break + [s.__exit__(*args) for s in self] + + def __getitem__(self, item): + out = super().__getitem__(item) + if isinstance(item, slice): + return OpenFiles(out, mode=self.mode, fs=self.fs) + return out + + def __repr__(self): + return "" % len(self) + + +def _close(fobjects, mode): + for f in reversed(fobjects): + if "r" not in mode and not f.closed: + f.flush() + f.close() + fobjects.clear() + + +def open_files( + urlpath, + mode="rb", + compression=None, + encoding="utf8", + errors=None, + name_function=None, + num=1, + protocol=None, + newline=None, + auto_mkdir=True, + expand=True, + **kwargs, +): + """Given a path or paths, return a list of ``OpenFile`` objects. + + For writing, a str path must contain the "*" character, which will be filled + in by increasing numbers, e.g., "part*" -> "part1", "part2" if num=2. + + For either reading or writing, can instead provide explicit list of paths. + + Parameters + ---------- + urlpath: string or list + Absolute or relative filepath(s). Prefix with a protocol like ``s3://`` + to read from alternative filesystems. To read from multiple files you + can pass a globstring or a list of paths, with the caveat that they + must all have the same protocol. + mode: 'rb', 'wt', etc. + compression: string or None + If given, open file using compression codec. Can either be a compression + name (a key in ``fsspec.compression.compr``) or "infer" to guess the + compression from the filename suffix. + encoding: str + For text mode only + errors: None or str + Passed to TextIOWrapper in text mode + name_function: function or None + if opening a set of files for writing, those files do not yet exist, + so we need to generate their names by formatting the urlpath for + each sequence number + num: int [1] + if writing mode, number of files we expect to create (passed to + name+function) + protocol: str or None + If given, overrides the protocol found in the URL. + newline: bytes or None + Used for line terminator in text mode. If None, uses system default; + if blank, uses no translation. + auto_mkdir: bool (True) + If in write mode, this will ensure the target directory exists before + writing, by calling ``fs.mkdirs(exist_ok=True)``. + expand: bool + **kwargs: dict + Extra options that make sense to a particular storage connection, e.g. + host, port, username, password, etc. + + Examples + -------- + >>> files = open_files('2015-*-*.csv') # doctest: +SKIP + >>> files = open_files( + ... 's3://bucket/2015-*-*.csv.gz', compression='gzip' + ... ) # doctest: +SKIP + + Returns + ------- + An ``OpenFiles`` instance, which is a list of ``OpenFile`` objects that can + be used as a single context + """ + fs, fs_token, paths = get_fs_token_paths( + urlpath, + mode, + num=num, + name_function=name_function, + storage_options=kwargs, + protocol=protocol, + expand=expand, + ) + if "r" not in mode and auto_mkdir: + parents = {fs._parent(path) for path in paths} + [fs.makedirs(parent, exist_ok=True) for parent in parents] + return OpenFiles( + [ + OpenFile( + fs, + path, + mode=mode, + compression=compression, + encoding=encoding, + errors=errors, + newline=newline, + ) + for path in paths + ], + mode=mode, + fs=fs, + ) + + +def _un_chain(path, kwargs): + if isinstance(path, (tuple, list)): + bits = [_un_chain(p, kwargs) for p in path] + out = [] + for pbit in zip(*bits): + paths, protocols, kwargs = zip(*pbit) + if len(set(protocols)) > 1: + raise ValueError("Protocol mismatch in URL chain") + if len(set(paths)) == 1: + paths = paths[0] + else: + paths = list(paths) + out.append([paths, protocols[0], kwargs[0]]) + return out + x = re.compile(".*[^a-z]+.*") # test for non protocol-like single word + bits = ( + [p if "://" in p or x.match(p) else p + "://" for p in path.split("::")] + if "::" in path + else [path] + ) + if len(bits) < 2: + return [] + # [[url, protocol, kwargs], ...] + out = [] + previous_bit = None + for bit in reversed(bits): + protocol = split_protocol(bit)[0] or "file" + cls = get_filesystem_class(protocol) + extra_kwargs = cls._get_kwargs_from_urls(bit) + kws = kwargs.get(protocol, {}) + kw = dict(**extra_kwargs, **kws) + bit = cls._strip_protocol(bit) + if ( + protocol in {"blockcache", "filecache", "simplecache"} + and "target_protocol" not in kw + ): + bit = previous_bit + out.append((bit, protocol, kw)) + previous_bit = bit + out = list(reversed(out)) + return out + + +def url_to_fs(url, **kwargs): + """ + Turn fully-qualified and potentially chained URL into filesystem instance + + Parameters + ---------- + url : str + The fsspec-compatible URL + **kwargs: dict + Extra options that make sense to a particular storage connection, e.g. + host, port, username, password, etc. + + Returns + ------- + filesystem : FileSystem + The new filesystem discovered from ``url`` and created with + ``**kwargs``. + urlpath : str + The file-systems-specific URL for ``url``. + """ + chain = _un_chain(url, kwargs) + if len(chain) > 1: + inkwargs = {} + # Reverse iterate the chain, creating a nested target_* structure + for i, ch in enumerate(reversed(chain)): + urls, protocol, kw = ch + if i == len(chain) - 1: + inkwargs = dict(**kw, **inkwargs) + continue + inkwargs["target_options"] = dict(**kw, **inkwargs) + inkwargs["target_protocol"] = protocol + inkwargs["fo"] = urls + urlpath, protocol, _ = chain[0] + fs = filesystem(protocol, **inkwargs) + else: + protocol = split_protocol(url)[0] + cls = get_filesystem_class(protocol) + + options = cls._get_kwargs_from_urls(url) + update_storage_options(options, kwargs) + fs = cls(**options) + urlpath = fs._strip_protocol(url) + return fs, urlpath + + +def open( + urlpath, + mode="rb", + compression=None, + encoding="utf8", + errors=None, + protocol=None, + newline=None, + **kwargs, +): + """Given a path or paths, return one ``OpenFile`` object. + + Parameters + ---------- + urlpath: string or list + Absolute or relative filepath. Prefix with a protocol like ``s3://`` + to read from alternative filesystems. Should not include glob + character(s). + mode: 'rb', 'wt', etc. + compression: string or None + If given, open file using compression codec. Can either be a compression + name (a key in ``fsspec.compression.compr``) or "infer" to guess the + compression from the filename suffix. + encoding: str + For text mode only + errors: None or str + Passed to TextIOWrapper in text mode + protocol: str or None + If given, overrides the protocol found in the URL. + newline: bytes or None + Used for line terminator in text mode. If None, uses system default; + if blank, uses no translation. + **kwargs: dict + Extra options that make sense to a particular storage connection, e.g. + host, port, username, password, etc. + + Examples + -------- + >>> openfile = open('2015-01-01.csv') # doctest: +SKIP + >>> openfile = open( + ... 's3://bucket/2015-01-01.csv.gz', compression='gzip' + ... ) # doctest: +SKIP + >>> with openfile as f: + ... df = pd.read_csv(f) # doctest: +SKIP + ... + + Returns + ------- + ``OpenFile`` object. + """ + return open_files( + urlpath=[urlpath], + mode=mode, + compression=compression, + encoding=encoding, + errors=errors, + protocol=protocol, + newline=newline, + expand=False, + **kwargs, + )[0] + + +def open_local(url, mode="rb", **storage_options): + """Open file(s) which can be resolved to local + + For files which either are local, or get downloaded upon open + (e.g., by file caching) + + Parameters + ---------- + url: str or list(str) + mode: str + Must be read mode + storage_options: + passed on to FS for or used by open_files (e.g., compression) + """ + if "r" not in mode: + raise ValueError("Can only ensure local files when reading") + of = open_files(url, mode=mode, **storage_options) + if not getattr(of[0].fs, "local_file", False): + raise ValueError( + "open_local can only be used on a filesystem which" + " has attribute local_file=True" + ) + with of as files: + paths = [f.name for f in files] + if isinstance(url, str) and not has_magic(url): + return paths[0] + return paths + + +def get_compression(urlpath, compression): + if compression == "infer": + compression = infer_compression(urlpath) + if compression is not None and compression not in compr: + raise ValueError("Compression type %s not supported" % compression) + return compression + + +def split_protocol(urlpath): + """Return protocol, path pair""" + urlpath = stringify_path(urlpath) + if "://" in urlpath: + protocol, path = urlpath.split("://", 1) + if len(protocol) > 1: + # excludes Windows paths + return protocol, path + return None, urlpath + + +def strip_protocol(urlpath): + """Return only path part of full URL, according to appropriate backend""" + protocol, _ = split_protocol(urlpath) + cls = get_filesystem_class(protocol) + return cls._strip_protocol(urlpath) + + +def expand_paths_if_needed(paths, mode, num, fs, name_function): + """Expand paths if they have a ``*`` in them (write mode) or any of ``*?[]`` + in them (read mode). + + :param paths: list of paths + mode: str + Mode in which to open files. + num: int + If opening in writing mode, number of files we expect to create. + fs: filesystem object + name_function: callable + If opening in writing mode, this callable is used to generate path + names. Names are generated for each partition by + ``urlpath.replace('*', name_function(partition_index))``. + :return: list of paths + """ + expanded_paths = [] + paths = list(paths) + + if "w" in mode: # read mode + if sum([1 for p in paths if "*" in p]) > 1: + raise ValueError( + "When writing data, only one filename mask can be specified." + ) + num = max(num, len(paths)) + + for curr_path in paths: + if "*" in curr_path: + # expand using name_function + expanded_paths.extend(_expand_paths(curr_path, name_function, num)) + else: + expanded_paths.append(curr_path) + # if we generated more paths that asked for, trim the list + if len(expanded_paths) > num: + expanded_paths = expanded_paths[:num] + + else: # read mode + for curr_path in paths: + if has_magic(curr_path): + # expand using glob + expanded_paths.extend(fs.glob(curr_path)) + else: + expanded_paths.append(curr_path) + + return expanded_paths + + +def get_fs_token_paths( + urlpath, + mode="rb", + num=1, + name_function=None, + storage_options=None, + protocol=None, + expand=True, +): + """Filesystem, deterministic token, and paths from a urlpath and options. + + Parameters + ---------- + urlpath: string or iterable + Absolute or relative filepath, URL (may include protocols like + ``s3://``), or globstring pointing to data. + mode: str, optional + Mode in which to open files. + num: int, optional + If opening in writing mode, number of files we expect to create. + name_function: callable, optional + If opening in writing mode, this callable is used to generate path + names. Names are generated for each partition by + ``urlpath.replace('*', name_function(partition_index))``. + storage_options: dict, optional + Additional keywords to pass to the filesystem class. + protocol: str or None + To override the protocol specifier in the URL + expand: bool + Expand string paths for writing, assuming the path is a directory + """ + if isinstance(urlpath, (list, tuple, set)): + if not urlpath: + raise ValueError("empty urlpath sequence") + urlpath = [stringify_path(u) for u in urlpath] + else: + urlpath = stringify_path(urlpath) + chain = _un_chain(urlpath, storage_options or {}) + if len(chain) > 1: + inkwargs = {} + # Reverse iterate the chain, creating a nested target_* structure + for i, ch in enumerate(reversed(chain)): + urls, nested_protocol, kw = ch + if i == len(chain) - 1: + inkwargs = dict(**kw, **inkwargs) + continue + inkwargs["target_options"] = dict(**kw, **inkwargs) + inkwargs["target_protocol"] = nested_protocol + inkwargs["fo"] = urls + paths, protocol, _ = chain[0] + fs = filesystem(protocol, **inkwargs) + if isinstance(paths, (list, tuple, set)): + paths = [fs._strip_protocol(u) for u in paths] + else: + paths = fs._strip_protocol(paths) + else: + if isinstance(urlpath, (list, tuple, set)): + protocols, paths = zip(*map(split_protocol, urlpath)) + if protocol is None: + protocol = protocols[0] + if not all(p == protocol for p in protocols): + raise ValueError( + "When specifying a list of paths, all paths must " + "share the same protocol" + ) + cls = get_filesystem_class(protocol) + optionss = list(map(cls._get_kwargs_from_urls, urlpath)) + paths = [cls._strip_protocol(u) for u in urlpath] + options = optionss[0] + if not all(o == options for o in optionss): + raise ValueError( + "When specifying a list of paths, all paths must " + "share the same file-system options" + ) + update_storage_options(options, storage_options) + fs = cls(**options) + else: + protocols = split_protocol(urlpath)[0] + protocol = protocol or protocols + cls = get_filesystem_class(protocol) + options = cls._get_kwargs_from_urls(urlpath) + paths = cls._strip_protocol(urlpath) + update_storage_options(options, storage_options) + fs = cls(**options) + + if isinstance(paths, (list, tuple, set)): + paths = expand_paths_if_needed(paths, mode, num, fs, name_function) + else: + if "w" in mode and expand: + paths = _expand_paths(paths, name_function, num) + elif "*" in paths: + paths = [f for f in sorted(fs.glob(paths)) if not fs.isdir(f)] + else: + paths = [paths] + + return fs, fs._fs_token, paths + + +def _expand_paths(path, name_function, num): + if isinstance(path, str): + if path.count("*") > 1: + raise ValueError("Output path spec must contain exactly one '*'.") + elif "*" not in path: + path = os.path.join(path, "*.part") + + if name_function is None: + name_function = build_name_function(num - 1) + + paths = [path.replace("*", name_function(i)) for i in range(num)] + if paths != sorted(paths): + logger.warning( + "In order to preserve order between partitions" + " paths created with ``name_function`` should " + "sort to partition order" + ) + elif isinstance(path, (tuple, list)): + assert len(path) == num + paths = list(path) + else: + raise ValueError( + "Path should be either\n" + "1. A list of paths: ['foo.json', 'bar.json', ...]\n" + "2. A directory: 'foo/\n" + "3. A path with a '*' in it: 'foo.*.json'" + ) + return paths diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/dircache.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/dircache.py new file mode 100644 index 0000000000000000000000000000000000000000..014e482e302a02b19d8588cd82d233711c5f3678 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/dircache.py @@ -0,0 +1,96 @@ +import time +from collections.abc import MutableMapping +from functools import lru_cache + + +class DirCache(MutableMapping): + """ + Caching of directory listings, in a structure like:: + + {"path0": [ + {"name": "path0/file0", + "size": 123, + "type": "file", + ... + }, + {"name": "path0/file1", + }, + ... + ], + "path1": [...] + } + + Parameters to this class control listing expiry or indeed turn + caching off + """ + + def __init__( + self, + use_listings_cache=True, + listings_expiry_time=None, + max_paths=None, + **kwargs, + ): + """ + + Parameters + ---------- + use_listings_cache: bool + If False, this cache never returns items, but always reports KeyError, + and setting items has no effect + listings_expiry_time: int or float (optional) + Time in seconds that a listing is considered valid. If None, + listings do not expire. + max_paths: int (optional) + The number of most recent listings that are considered valid; 'recent' + refers to when the entry was set. + """ + self._cache = {} + self._times = {} + if max_paths: + self._q = lru_cache(max_paths + 1)(lambda key: self._cache.pop(key, None)) + self.use_listings_cache = use_listings_cache + self.listings_expiry_time = listings_expiry_time + self.max_paths = max_paths + + def __getitem__(self, item): + if self.listings_expiry_time is not None: + if self._times.get(item, 0) - time.time() < -self.listings_expiry_time: + del self._cache[item] + if self.max_paths: + self._q(item) + return self._cache[item] # maybe raises KeyError + + def clear(self): + self._cache.clear() + + def __len__(self): + return len(self._cache) + + def __contains__(self, item): + try: + self[item] + return True + except KeyError: + return False + + def __setitem__(self, key, value): + if not self.use_listings_cache: + return + if self.max_paths: + self._q(key) + self._cache[key] = value + if self.listings_expiry_time is not None: + self._times[key] = time.time() + + def __delitem__(self, key): + del self._cache[key] + + def __iter__(self): + return (k for k in self._cache if k in self) + + def __reduce__(self): + return ( + DirCache, + (self.use_listings_cache, self.listings_expiry_time, self.max_paths), + ) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/exceptions.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..2d6e1a44b6a1667d1c302869ff2a332634fda47e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/exceptions.py @@ -0,0 +1,21 @@ +""" +fsspec user-defined exception classes +""" +import asyncio + + +class BlocksizeMismatchError(ValueError): + """ + Raised when a cached file is opened with a different blocksize than it was + written with + """ + + ... + + +class FSTimeoutError(asyncio.TimeoutError): + """ + Raised when a fsspec function timed out occurs + """ + + ... diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/fuse.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/fuse.py new file mode 100644 index 0000000000000000000000000000000000000000..b37d1d67de7b600ad41a26477122103124fbf368 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/fuse.py @@ -0,0 +1,324 @@ +import argparse +import logging +import os +import stat +import threading +import time +from errno import EIO, ENOENT + +from fuse import FUSE, FuseOSError, LoggingMixIn, Operations + +from fsspec import __version__ +from fsspec.core import url_to_fs + +logger = logging.getLogger("fsspec.fuse") + + +class FUSEr(Operations): + def __init__(self, fs, path, ready_file=False): + self.fs = fs + self.cache = {} + self.root = path.rstrip("/") + "/" + self.counter = 0 + logger.info("Starting FUSE at %s", path) + self._ready_file = ready_file + + def getattr(self, path, fh=None): + logger.debug("getattr %s", path) + if self._ready_file and path in ["/.fuse_ready", ".fuse_ready"]: + return {"type": "file", "st_size": 5} + + path = "".join([self.root, path.lstrip("/")]).rstrip("/") + try: + info = self.fs.info(path) + except FileNotFoundError: + raise FuseOSError(ENOENT) + + data = {"st_uid": info.get("uid", 1000), "st_gid": info.get("gid", 1000)} + perm = info.get("mode", 0o777) + + if info["type"] != "file": + data["st_mode"] = stat.S_IFDIR | perm + data["st_size"] = 0 + data["st_blksize"] = 0 + else: + data["st_mode"] = stat.S_IFREG | perm + data["st_size"] = info["size"] + data["st_blksize"] = 5 * 2**20 + data["st_nlink"] = 1 + data["st_atime"] = time.time() + data["st_ctime"] = time.time() + data["st_mtime"] = time.time() + return data + + def readdir(self, path, fh): + logger.debug("readdir %s", path) + path = "".join([self.root, path.lstrip("/")]) + files = self.fs.ls(path, False) + files = [os.path.basename(f.rstrip("/")) for f in files] + return [".", ".."] + files + + def mkdir(self, path, mode): + path = "".join([self.root, path.lstrip("/")]) + self.fs.mkdir(path) + return 0 + + def rmdir(self, path): + path = "".join([self.root, path.lstrip("/")]) + self.fs.rmdir(path) + return 0 + + def read(self, path, size, offset, fh): + logger.debug("read %s", (path, size, offset)) + if self._ready_file and path in ["/.fuse_ready", ".fuse_ready"]: + # status indicator + return b"ready" + + f = self.cache[fh] + f.seek(offset) + out = f.read(size) + return out + + def write(self, path, data, offset, fh): + logger.debug("write %s", (path, offset)) + f = self.cache[fh] + f.seek(offset) + f.write(data) + return len(data) + + def create(self, path, flags, fi=None): + logger.debug("create %s", (path, flags)) + fn = "".join([self.root, path.lstrip("/")]) + self.fs.touch(fn) # OS will want to get attributes immediately + f = self.fs.open(fn, "wb") + self.cache[self.counter] = f + self.counter += 1 + return self.counter - 1 + + def open(self, path, flags): + logger.debug("open %s", (path, flags)) + fn = "".join([self.root, path.lstrip("/")]) + if flags % 2 == 0: + # read + mode = "rb" + else: + # write/create + mode = "wb" + self.cache[self.counter] = self.fs.open(fn, mode) + self.counter += 1 + return self.counter - 1 + + def truncate(self, path, length, fh=None): + fn = "".join([self.root, path.lstrip("/")]) + if length != 0: + raise NotImplementedError + # maybe should be no-op since open with write sets size to zero anyway + self.fs.touch(fn) + + def unlink(self, path): + fn = "".join([self.root, path.lstrip("/")]) + try: + self.fs.rm(fn, False) + except (IOError, FileNotFoundError): + raise FuseOSError(EIO) + + def release(self, path, fh): + try: + if fh in self.cache: + f = self.cache[fh] + f.close() + self.cache.pop(fh) + except Exception as e: + print(e) + return 0 + + def chmod(self, path, mode): + if hasattr(self.fs, "chmod"): + path = "".join([self.root, path.lstrip("/")]) + return self.fs.chmod(path, mode) + raise NotImplementedError + + +def run( + fs, + path, + mount_point, + foreground=True, + threads=False, + ready_file=False, + ops_class=FUSEr, +): + """Mount stuff in a local directory + + This uses fusepy to make it appear as if a given path on an fsspec + instance is in fact resident within the local file-system. + + This requires that fusepy by installed, and that FUSE be available on + the system (typically requiring a package to be installed with + apt, yum, brew, etc.). + + Parameters + ---------- + fs: file-system instance + From one of the compatible implementations + path: str + Location on that file-system to regard as the root directory to + mount. Note that you typically should include the terminating "/" + character. + mount_point: str + An empty directory on the local file-system where the contents of + the remote path will appear. + foreground: bool + Whether or not calling this function will block. Operation will + typically be more stable if True. + threads: bool + Whether or not to create threads when responding to file operations + within the mounter directory. Operation will typically be more + stable if False. + ready_file: bool + Whether the FUSE process is ready. The `.fuse_ready` file will + exist in the `mount_point` directory if True. Debugging purpose. + ops_class: FUSEr or Subclass of FUSEr + To override the default behavior of FUSEr. For Example, logging + to file. + + """ + func = lambda: FUSE( + ops_class(fs, path, ready_file=ready_file), + mount_point, + nothreads=not threads, + foreground=foreground, + ) + if not foreground: + th = threading.Thread(target=func) + th.daemon = True + th.start() + return th + else: # pragma: no cover + try: + func() + except KeyboardInterrupt: + pass + + +def main(args): + """Mount filesystem from chained URL to MOUNT_POINT. + + Examples: + + python3 -m fsspec.fuse memory /usr/share /tmp/mem + + python3 -m fsspec.fuse local /tmp/source /tmp/local \\ + -l /tmp/fsspecfuse.log + + You can also mount chained-URLs and use special settings: + + python3 -m fsspec.fuse 'filecache::zip::file://data.zip' \\ + / /tmp/zip \\ + -o 'filecache-cache_storage=/tmp/simplecache' + + You can specify the type of the setting by using `[int]` or `[bool]`, + (`true`, `yes`, `1` represents the Boolean value `True`): + + python3 -m fsspec.fuse 'simplecache::ftp://ftp1.at.proftpd.org' \\ + /historic/packages/RPMS /tmp/ftp \\ + -o 'simplecache-cache_storage=/tmp/simplecache' \\ + -o 'simplecache-check_files=false[bool]' \\ + -o 'ftp-listings_expiry_time=60[int]' \\ + -o 'ftp-username=anonymous' \\ + -o 'ftp-password=xieyanbo' + """ + + class RawDescriptionArgumentParser(argparse.ArgumentParser): + def format_help(self): + usage = super(RawDescriptionArgumentParser, self).format_help() + parts = usage.split("\n\n") + parts[1] = self.description.rstrip() + return "\n\n".join(parts) + + parser = RawDescriptionArgumentParser(prog="fsspec.fuse", description=main.__doc__) + parser.add_argument("--version", action="version", version=__version__) + parser.add_argument("url", type=str, help="fs url") + parser.add_argument("source_path", type=str, help="source directory in fs") + parser.add_argument("mount_point", type=str, help="local directory") + parser.add_argument( + "-o", + "--option", + action="append", + help="Any options of protocol included in the chained URL", + ) + parser.add_argument( + "-l", "--log-file", type=str, help="Logging FUSE debug info (Default: '')" + ) + parser.add_argument( + "-f", + "--foreground", + action="store_false", + help="Running in foreground or not (Default: False)", + ) + parser.add_argument( + "-t", + "--threads", + action="store_false", + help="Running with threads support (Default: False)", + ) + parser.add_argument( + "-r", + "--ready-file", + action="store_false", + help="The `.fuse_ready` file will exist after FUSE is ready. " + "(Debugging purpose, Default: False)", + ) + args = parser.parse_args(args) + + kwargs = {} + for item in args.option or []: + key, sep, value = item.partition("=") + if not sep: + parser.error(message="Wrong option: {!r}".format(item)) + val = value.lower() + if val.endswith("[int]"): + value = int(value[: -len("[int]")]) + elif val.endswith("[bool]"): + value = val[: -len("[bool]")] in ["1", "yes", "true"] + + if "-" in key: + fs_name, setting_name = key.split("-", 1) + if fs_name in kwargs: + kwargs[fs_name][setting_name] = value + else: + kwargs[fs_name] = {setting_name: value} + else: + kwargs[key] = value + + if args.log_file: + logging.basicConfig( + level=logging.DEBUG, + filename=args.log_file, + format="%(asctime)s %(message)s", + ) + + class LoggingFUSEr(FUSEr, LoggingMixIn): + pass + + fuser = LoggingFUSEr + else: + fuser = FUSEr + + fs, url_path = url_to_fs(args.url, **kwargs) + logger.debug("Mounting %s to %s", url_path, str(args.mount_point)) + run( + fs, + args.source_path, + args.mount_point, + foreground=args.foreground, + threads=args.threads, + ready_file=args.ready_file, + ops_class=fuser, + ) + + +if __name__ == "__main__": + import sys + + main(sys.argv[1:]) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/generic.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/generic.py new file mode 100644 index 0000000000000000000000000000000000000000..a8de34e14ca62a6d01ca40f523890c3f749d150a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/generic.py @@ -0,0 +1,167 @@ +import inspect + +from .asyn import AsyncFileSystem +from .callbacks import _DEFAULT_CALLBACK +from .core import filesystem, get_filesystem_class, split_protocol + +_generic_fs = {} + + +def set_generic_fs(protocol, **storage_options): + _generic_fs[protocol] = filesystem(protocol, **storage_options) + + +default_method = "default" + + +def _resolve_fs(url, method=None, protocol=None, storage_options=None): + """Pick instance of backend FS""" + method = method or default_method + protocol = protocol or split_protocol(url)[0] + storage_options = storage_options or {} + if method == "default": + return filesystem(protocol) + if method == "generic": + return _generic_fs[protocol] + if method == "current": + cls = get_filesystem_class(protocol) + return cls.current() + if method == "options": + return filesystem(protocol, **storage_options.get(protocol, {})) + raise ValueError(f"Unknown FS resolution method: {method}") + + +class GenericFileSystem(AsyncFileSystem): + """Wrapper over all other FS types + + + + This implementation is a single unified interface to be able to run FS operations + over generic URLs, and dispatch to the specific implementations using the URL + protocol prefix. + + Note: instances of this FS are always async, even if you never use it with any async + backend. + """ + + protocol = "generic" # there is no real reason to ever use a protocol with this FS + + def __init__(self, default_method=None, **kwargs): + """ + + Parameters + ---------- + default_method: str (optional) + Defines how to configure backend FS instances. Options are: + - "default" (you get this with None): instantiate like FSClass(), with no + extra arguments; this is the default instance of that FS, and can be + configured via the config system + - "generic": takes instances from the `_generic_fs` dict in this module, + which you must populate before use. Keys are by protocol + - "current": takes the most recently instantiated version of each FS + - "options": expect ``storage_options`` to be passed along with every call. + """ + self.method = default_method + super(GenericFileSystem, self).__init__(**kwargs) + + async def _info( + self, url, method=None, protocol=None, storage_options=None, fs=None, **kwargs + ): + fs = fs or _resolve_fs(url, method or self.method, protocol, storage_options) + if fs.async_impl: + out = await fs._info(url, **kwargs) + else: + out = fs.info(url, **kwargs) + out["name"] = fs.unstrip_protocol(out["name"]) + return out + + async def _ls( + self, + url, + method=None, + protocol=None, + storage_options=None, + fs=None, + detail=True, + **kwargs, + ): + fs = fs or _resolve_fs(url, method or self.method, protocol, storage_options) + if fs.async_impl: + out = await fs._ls(url, detail=True, **kwargs) + else: + out = fs.ls(url, detail=True, **kwargs) + for o in out: + o["name"] = fs.unstrip_protocol(o["name"]) + if detail: + return out + else: + return [o["name"] for o in out] + + async def _rm( + self, url, method=None, protocol=None, storage_options=None, fs=None, **kwargs + ): + fs = fs or _resolve_fs(url, method or self.method, protocol, storage_options) + if fs.async_impl: + await fs._rm(url, **kwargs) + else: + fs.rm(url, **kwargs) + + async def _cp_file( + self, + url, + url2, + method=None, + protocol=None, + storage_options=None, + fs=None, + method2=None, + protocol2=None, + storage_options2=None, + fs2=None, + blocksize=2**20, + callback=_DEFAULT_CALLBACK, + **kwargs, + ): + fs = fs or _resolve_fs(url, method or self.method, protocol, storage_options) + fs2 = fs2 or _resolve_fs( + url2, method2 or self.method, protocol2, storage_options2 + ) + if fs is fs2: + # pure remote + if fs.async_impl: + return await fs._cp_file(url, url2, **kwargs) + else: + return fs.cp_file(url, url2, **kwargs) + kw = {"blocksize": 0, "cache_type": "none"} + try: + f1 = ( + await fs.open_async(url, "rb") + if hasattr(fs, "open_async") + else fs.open(url, "rb", **kw) + ) + callback.set_size(maybe_await(f1.size)) + f2 = ( + await fs2.open_async(url2, "wb") + if hasattr(fs2, "open_async") + else fs2.open(url2, "wb", **kw) + ) + while f1.size is None or f2.tell() < f1.size: + data = await maybe_await(f1.read(blocksize)) + if f1.size is None and not data: + break + await maybe_await(f2.write(data)) + callback.absolute_update(f2.tell()) + finally: + try: + await maybe_await(f2.close()) + await maybe_await(f1.close()) + except NameError: + # fail while opening f1 or f2 + pass + + +async def maybe_await(cor): + if inspect.iscoroutine(cor): + return await cor + else: + return cor diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/gui.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/gui.py new file mode 100644 index 0000000000000000000000000000000000000000..78eae1cf9cc798bdc982201a3a3e9e2978cf4a2b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/gui.py @@ -0,0 +1,408 @@ +import ast +import contextlib +import logging +import os +import re + +import panel as pn + +from .core import OpenFile, get_filesystem_class, split_protocol +from .registry import known_implementations + +pn.extension() +logger = logging.getLogger("fsspec.gui") + + +class SigSlot(object): + """Signal-slot mixin, for Panel event passing + + Include this class in a widget manager's superclasses to be able to + register events and callbacks on Panel widgets managed by that class. + + The method ``_register`` should be called as widgets are added, and external + code should call ``connect`` to associate callbacks. + + By default, all signals emit a DEBUG logging statement. + """ + + signals = [] # names of signals that this class may emit + # each of which must be set by _register for any new instance + slots = [] # names of actions that this class may respond to + + # each of which must be a method name + + def __init__(self): + self._ignoring_events = False + self._sigs = {} + self._map = {} + self._setup() + + def _setup(self): + """Create GUI elements and register signals""" + self.panel = pn.pane.PaneBase() + # no signals to set up in the base class + + def _register( + self, widget, name, thing="value", log_level=logging.DEBUG, auto=False + ): + """Watch the given attribute of a widget and assign it a named event + + This is normally called at the time a widget is instantiated, in the + class which owns it. + + Parameters + ---------- + widget : pn.layout.Panel or None + Widget to watch. If None, an anonymous signal not associated with + any widget. + name : str + Name of this event + thing : str + Attribute of the given widget to watch + log_level : int + When the signal is triggered, a logging event of the given level + will be fired in the dfviz logger. + auto : bool + If True, automatically connects with a method in this class of the + same name. + """ + if name not in self.signals: + raise ValueError("Attempt to assign an undeclared signal: %s" % name) + self._sigs[name] = { + "widget": widget, + "callbacks": [], + "thing": thing, + "log": log_level, + } + wn = "-".join( + [ + getattr(widget, "name", str(widget)) if widget is not None else "none", + thing, + ] + ) + self._map[wn] = name + if widget is not None: + widget.param.watch(self._signal, thing, onlychanged=True) + if auto and hasattr(self, name): + self.connect(name, getattr(self, name)) + + def _repr_mimebundle_(self, *args, **kwargs): + """Display in a notebook or a server""" + try: + return self.panel._repr_mimebundle_(*args, **kwargs) + except (ValueError, AttributeError): + raise NotImplementedError("Panel does not seem to be set " "up properly") + + def connect(self, signal, slot): + """Associate call back with given event + + The callback must be a function which takes the "new" value of the + watched attribute as the only parameter. If the callback return False, + this cancels any further processing of the given event. + + Alternatively, the callback can be a string, in which case it means + emitting the correspondingly-named event (i.e., connect to self) + """ + self._sigs[signal]["callbacks"].append(slot) + + def _signal(self, event): + """This is called by a an action on a widget + + Within an self.ignore_events context, nothing happens. + + Tests can execute this method by directly changing the values of + widget components. + """ + if not self._ignoring_events: + wn = "-".join([event.obj.name, event.name]) + if wn in self._map and self._map[wn] in self._sigs: + self._emit(self._map[wn], event.new) + + @contextlib.contextmanager + def ignore_events(self): + """Temporarily turn off events processing in this instance + + (does not propagate to children) + """ + self._ignoring_events = True + try: + yield + finally: + self._ignoring_events = False + + def _emit(self, sig, value=None): + """An event happened, call its callbacks + + This method can be used in tests to simulate message passing without + directly changing visual elements. + + Calling of callbacks will halt whenever one returns False. + """ + logger.log(self._sigs[sig]["log"], "{}: {}".format(sig, value)) + for callback in self._sigs[sig]["callbacks"]: + if isinstance(callback, str): + self._emit(callback) + else: + try: + # running callbacks should not break the interface + ret = callback(value) + if ret is False: + break + except Exception as e: + logger.exception( + "Exception (%s) while executing callback for signal: %s" + "" % (e, sig) + ) + + def show(self, threads=False): + """Open a new browser tab and display this instance's interface""" + self.panel.show(threads=threads, verbose=False) + return self + + +class SingleSelect(SigSlot): + """A multiselect which only allows you to select one item for an event""" + + signals = ["_selected", "selected"] # the first is internal + slots = ["set_options", "set_selection", "add", "clear", "select"] + + def __init__(self, **kwargs): + self.kwargs = kwargs + super().__init__() + + def _setup(self): + self.panel = pn.widgets.MultiSelect(**self.kwargs) + self._register(self.panel, "_selected", "value") + self._register(None, "selected") + self.connect("_selected", self.select_one) + + def _signal(self, *args, **kwargs): + super()._signal(*args, **kwargs) + + def select_one(self, *_): + with self.ignore_events(): + val = [self.panel.value[-1]] if self.panel.value else [] + self.panel.value = val + self._emit("selected", self.panel.value) + + def set_options(self, options): + self.panel.options = options + + def clear(self): + self.panel.options = [] + + @property + def value(self): + return self.panel.value + + def set_selection(self, selection): + self.panel.value = [selection] + + +class FileSelector(SigSlot): + """Panel-based graphical file selector widget + + Instances of this widget are interactive and can be displayed in jupyter by having + them as the output of a cell, or in a separate browser tab using ``.show()``. + """ + + signals = [ + "protocol_changed", + "selection_changed", + "directory_entered", + "home_clicked", + "up_clicked", + "go_clicked", + "filters_changed", + ] + slots = ["set_filters", "go_home"] + + def __init__(self, url=None, filters=None, ignore=None, kwargs=None): + """ + + Parameters + ---------- + url : str (optional) + Initial value of the URL to populate the dialog; should include protocol + filters : list(str) (optional) + File endings to include in the listings. If not included, all files are + allowed. Does not affect directories. + If given, the endings will appear as checkboxes in the interface + ignore : list(str) (optional) + Regex(s) of file basename patterns to ignore, e.g., "\\." for typical + hidden files on posix + kwargs : dict (optional) + To pass to file system instance + """ + if url: + self.init_protocol, url = split_protocol(url) + else: + self.init_protocol, url = "file", os.getcwd() + self.init_url = url + self.init_kwargs = kwargs or "{}" + self.filters = filters + self.ignore = [re.compile(i) for i in ignore or []] + self._fs = None + super().__init__() + + def _setup(self): + self.url = pn.widgets.TextInput( + name="url", + value=self.init_url, + align="end", + sizing_mode="stretch_width", + width_policy="max", + ) + self.protocol = pn.widgets.Select( + options=list(sorted(known_implementations)), + value=self.init_protocol, + name="protocol", + align="center", + ) + self.kwargs = pn.widgets.TextInput(name="kwargs", value="{}", align="center") + self.go = pn.widgets.Button(name="⇨", align="end", width=45) + self.main = SingleSelect(size=10) + self.home = pn.widgets.Button(name="🏠", width=40, height=30, align="end") + self.up = pn.widgets.Button(name="‹", width=30, height=30, align="end") + + self._register(self.protocol, "protocol_changed", auto=True) + self._register(self.go, "go_clicked", "clicks", auto=True) + self._register(self.up, "up_clicked", "clicks", auto=True) + self._register(self.home, "home_clicked", "clicks", auto=True) + self._register(None, "selection_changed") + self.main.connect("selected", self.selection_changed) + self._register(None, "directory_entered") + self.prev_protocol = self.protocol.value + self.prev_kwargs = self.storage_options + + self.filter_sel = pn.widgets.CheckBoxGroup( + value=[], options=[], inline=False, align="end", width_policy="min" + ) + self._register(self.filter_sel, "filters_changed", auto=True) + + self.panel = pn.Column( + pn.Row(self.protocol, self.kwargs), + pn.Row(self.home, self.up, self.url, self.go, self.filter_sel), + self.main.panel, + ) + self.set_filters(self.filters) + self.go_clicked() + + def set_filters(self, filters=None): + self.filters = filters + if filters: + self.filter_sel.options = filters + self.filter_sel.value = filters + else: + self.filter_sel.options = [] + self.filter_sel.value = [] + + @property + def storage_options(self): + """Value of the kwargs box as a dictionary""" + return ast.literal_eval(self.kwargs.value) or {} + + @property + def fs(self): + """Current filesystem instance""" + if self._fs is None: + cls = get_filesystem_class(self.protocol.value) + self._fs = cls(**self.storage_options) + return self._fs + + @property + def urlpath(self): + """URL of currently selected item""" + return ( + (self.protocol.value + "://" + self.main.value[0]) + if self.main.value + else None + ) + + def open_file(self, mode="rb", compression=None, encoding=None): + """Create OpenFile instance for the currently selected item + + For example, in a notebook you might do something like + + .. code-block:: + + [ ]: sel = FileSelector(); sel + + # user selects their file + + [ ]: with sel.open_file('rb') as f: + ... out = f.read() + + Parameters + ---------- + mode: str (optional) + Open mode for the file. + compression: str (optional) + The interact with the file as compressed. Set to 'infer' to guess + compression from the file ending + encoding: str (optional) + If using text mode, use this encoding; defaults to UTF8. + """ + if self.urlpath is None: + raise ValueError("No file selected") + return OpenFile(self.fs, self.urlpath, mode, compression, encoding) + + def filters_changed(self, values): + self.filters = values + self.go_clicked() + + def selection_changed(self, *_): + if self.urlpath is None: + return + if self.fs.isdir(self.urlpath): + self.url.value = self.fs._strip_protocol(self.urlpath) + self.go_clicked() + + def go_clicked(self, *_): + if ( + self.prev_protocol != self.protocol.value + or self.prev_kwargs != self.storage_options + ): + self._fs = None # causes fs to be recreated + self.prev_protocol = self.protocol.value + self.prev_kwargs = self.storage_options + listing = sorted( + self.fs.ls(self.url.value, detail=True), key=lambda x: x["name"] + ) + listing = [ + l + for l in listing + if not any(i.match(l["name"].rsplit("/", 1)[-1]) for i in self.ignore) + ] + folders = { + "📁 " + o["name"].rsplit("/", 1)[-1]: o["name"] + for o in listing + if o["type"] == "directory" + } + files = { + "📄 " + o["name"].rsplit("/", 1)[-1]: o["name"] + for o in listing + if o["type"] == "file" + } + if self.filters: + files = { + k: v + for k, v in files.items() + if any(v.endswith(ext) for ext in self.filters) + } + self.main.set_options(dict(**folders, **files)) + + def protocol_changed(self, *_): + self._fs = None + self.main.options = [] + self.url.value = "" + + def home_clicked(self, *_): + self.protocol.value = self.init_protocol + self.kwargs.value = self.init_kwargs + self.url.value = self.init_url + self.go_clicked() + + def up_clicked(self, *_): + self.url.value = self.fs._parent(self.url.value) + self.go_clicked() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/mapping.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/mapping.py new file mode 100644 index 0000000000000000000000000000000000000000..100388f1d1a2e67973df4e53df096adb042a140a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/mapping.py @@ -0,0 +1,232 @@ +import array +from collections.abc import MutableMapping + +from .core import url_to_fs + + +class FSMap(MutableMapping): + """Wrap a FileSystem instance as a mutable wrapping. + + The keys of the mapping become files under the given root, and the + values (which must be bytes) the contents of those files. + + Parameters + ---------- + root: string + prefix for all the files + fs: FileSystem instance + check: bool (=True) + performs a touch at the location, to check for write access. + + Examples + -------- + >>> fs = FileSystem(**parameters) # doctest: +SKIP + >>> d = FSMap('my-data/path/', fs) # doctest: +SKIP + or, more likely + >>> d = fs.get_mapper('my-data/path/') + + >>> d['loc1'] = b'Hello World' # doctest: +SKIP + >>> list(d.keys()) # doctest: +SKIP + ['loc1'] + >>> d['loc1'] # doctest: +SKIP + b'Hello World' + """ + + def __init__(self, root, fs, check=False, create=False, missing_exceptions=None): + self.fs = fs + self.root = fs._strip_protocol(root).rstrip( + "/" + ) # we join on '/' in _key_to_str + if missing_exceptions is None: + missing_exceptions = ( + FileNotFoundError, + IsADirectoryError, + NotADirectoryError, + ) + self.missing_exceptions = missing_exceptions + self.check = check + self.create = create + if create: + if not self.fs.exists(root): + self.fs.mkdir(root) + if check: + if not self.fs.exists(root): + raise ValueError( + "Path %s does not exist. Create " + " with the ``create=True`` keyword" % root + ) + self.fs.touch(root + "/a") + self.fs.rm(root + "/a") + + def clear(self): + """Remove all keys below root - empties out mapping""" + try: + self.fs.rm(self.root, True) + self.fs.mkdir(self.root) + except: # noqa: E722 + pass + + def getitems(self, keys, on_error="raise"): + """Fetch multiple items from the store + + If the backend is async-able, this might proceed concurrently + + Parameters + ---------- + keys: list(str) + They keys to be fetched + on_error : "raise", "omit", "return" + If raise, an underlying exception will be raised (converted to KeyError + if the type is in self.missing_exceptions); if omit, keys with exception + will simply not be included in the output; if "return", all keys are + included in the output, but the value will be bytes or an exception + instance. + + Returns + ------- + dict(key, bytes|exception) + """ + keys2 = [self._key_to_str(k) for k in keys] + oe = on_error if on_error == "raise" else "return" + try: + out = self.fs.cat(keys2, on_error=oe) + if isinstance(out, bytes): + out = {keys2[0]: out} + except self.missing_exceptions as e: + raise KeyError from e + out = { + k: (KeyError() if isinstance(v, self.missing_exceptions) else v) + for k, v in out.items() + } + return { + key: out[k2] + for key, k2 in zip(keys, keys2) + if on_error == "return" or not isinstance(out[k2], BaseException) + } + + def setitems(self, values_dict): + """Set the values of multiple items in the store + + Parameters + ---------- + values_dict: dict(str, bytes) + """ + values = {self._key_to_str(k): maybe_convert(v) for k, v in values_dict.items()} + self.fs.pipe(values) + + def delitems(self, keys): + """Remove multiple keys from the store""" + self.fs.rm([self._key_to_str(k) for k in keys]) + + def _key_to_str(self, key): + """Generate full path for the key""" + if isinstance(key, (tuple, list)): + key = str(tuple(key)) + else: + key = str(key) + return self.fs._strip_protocol("/".join([self.root, key]) if self.root else key) + + def _str_to_key(self, s): + """Strip path of to leave key name""" + return s[len(self.root) :].lstrip("/") + + def __getitem__(self, key, default=None): + """Retrieve data""" + k = self._key_to_str(key) + try: + result = self.fs.cat(k) + except self.missing_exceptions: + if default is not None: + return default + raise KeyError(key) + return result + + def pop(self, key, default=None): + result = self.__getitem__(key, default) + try: + del self[key] + except KeyError: + pass + return result + + def __setitem__(self, key, value): + """Store value in key""" + key = self._key_to_str(key) + self.fs.mkdirs(self.fs._parent(key), exist_ok=True) + self.fs.pipe_file(key, maybe_convert(value)) + + def __iter__(self): + return (self._str_to_key(x) for x in self.fs.find(self.root)) + + def __len__(self): + return len(self.fs.find(self.root)) + + def __delitem__(self, key): + """Remove key""" + try: + self.fs.rm(self._key_to_str(key)) + except: # noqa: E722 + raise KeyError + + def __contains__(self, key): + """Does key exist in mapping?""" + path = self._key_to_str(key) + return self.fs.exists(path) and self.fs.isfile(path) + + def __reduce__(self): + return FSMap, (self.root, self.fs, False, False, self.missing_exceptions) + + +def maybe_convert(value): + if isinstance(value, array.array) or hasattr(value, "__array__"): + # bytes-like things + if hasattr(value, "dtype") and value.dtype.kind in "Mm": + # The buffer interface doesn't support datetime64/timdelta64 numpy + # arrays + value = value.view("int64") + value = bytearray(memoryview(value)) + return value + + +def get_mapper( + url="", + check=False, + create=False, + missing_exceptions=None, + alternate_root=None, + **kwargs, +): + """Create key-value interface for given URL and options + + The URL will be of the form "protocol://location" and point to the root + of the mapper required. All keys will be file-names below this location, + and their values the contents of each key. + + Also accepts compound URLs like zip::s3://bucket/file.zip , see ``fsspec.open``. + + Parameters + ---------- + url: str + Root URL of mapping + check: bool + Whether to attempt to read from the location before instantiation, to + check that the mapping does exist + create: bool + Whether to make the directory corresponding to the root before + instantiating + missing_exceptions: None or tuple + If given, these exception types will be regarded as missing keys and + return KeyError when trying to read data. By default, you get + (FileNotFoundError, IsADirectoryError, NotADirectoryError) + alternate_root: None or str + In cases of complex URLs, the parser may fail to pick the correct part + for the mapper root, so this arg can override + + Returns + ------- + ``FSMap`` instance, the dict-like key-value store. + """ + # Removing protocol here - could defer to each open() on the backend + fs, urlpath = url_to_fs(url, **kwargs) + root = alternate_root if alternate_root is not None else urlpath + return FSMap(root, fs, check, create, missing_exceptions=missing_exceptions) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/parquet.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/parquet.py new file mode 100644 index 0000000000000000000000000000000000000000..af55f8cf48e80ed81ba9abc3bff51915a5daf84c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/parquet.py @@ -0,0 +1,551 @@ +import io +import json +import warnings + +from .core import url_to_fs +from .utils import merge_offset_ranges + +# Parquet-Specific Utilities for fsspec +# +# Most of the functions defined in this module are NOT +# intended for public consumption. The only exception +# to this is `open_parquet_file`, which should be used +# place of `fs.open()` to open parquet-formatted files +# on remote file systems. + + +def open_parquet_file( + path, + mode="rb", + fs=None, + metadata=None, + columns=None, + row_groups=None, + storage_options=None, + strict=False, + engine="auto", + max_gap=64_000, + max_block=256_000_000, + footer_sample_size=1_000_000, + **kwargs, +): + """ + Return a file-like object for a single Parquet file. + + The specified parquet `engine` will be used to parse the + footer metadata, and determine the required byte ranges + from the file. The target path will then be opened with + the "parts" (`KnownPartsOfAFile`) caching strategy. + + Note that this method is intended for usage with remote + file systems, and is unlikely to improve parquet-read + performance on local file systems. + + Parameters + ---------- + path: str + Target file path. + mode: str, optional + Mode option to be passed through to `fs.open`. Default is "rb". + metadata: Any, optional + Parquet metadata object. Object type must be supported + by the backend parquet engine. For now, only the "fastparquet" + engine supports an explicit `ParquetFile` metadata object. + If a metadata object is supplied, the remote footer metadata + will not need to be transferred into local memory. + fs: AbstractFileSystem, optional + Filesystem object to use for opening the file. If nothing is + specified, an `AbstractFileSystem` object will be inferred. + engine : str, default "auto" + Parquet engine to use for metadata parsing. Allowed options + include "fastparquet", "pyarrow", and "auto". The specified + engine must be installed in the current environment. If + "auto" is specified, and both engines are installed, + "fastparquet" will take precedence over "pyarrow". + columns: list, optional + List of all column names that may be read from the file. + row_groups : list, optional + List of all row-groups that may be read from the file. This + may be a list of row-group indices (integers), or it may be + a list of `RowGroup` metadata objects (if the "fastparquet" + engine is used). + storage_options : dict, optional + Used to generate an `AbstractFileSystem` object if `fs` was + not specified. + strict : bool, optional + Whether the resulting `KnownPartsOfAFile` cache should + fetch reads that go beyond a known byte-range boundary. + If `False` (the default), any read that ends outside a + known part will be zero padded. Note that using + `strict=True` may be useful for debugging. + max_gap : int, optional + Neighboring byte ranges will only be merged when their + inter-range gap is <= `max_gap`. Default is 64KB. + max_block : int, optional + Neighboring byte ranges will only be merged when the size of + the aggregated range is <= `max_block`. Default is 256MB. + footer_sample_size : int, optional + Number of bytes to read from the end of the path to look + for the footer metadata. If the sampled bytes do not contain + the footer, a second read request will be required, and + performance will suffer. Default is 1MB. + **kwargs : + Optional key-word arguments to pass to `fs.open` + """ + + # Make sure we have an `AbstractFileSystem` object + # to work with + if fs is None: + fs = url_to_fs(path, **(storage_options or {}))[0] + + # For now, `columns == []` not supported. Just use + # default `open` command with `path` input + if columns is not None and len(columns) == 0: + return fs.open(path, mode=mode) + + # Set the engine + engine = _set_engine(engine) + + # Fetch the known byte ranges needed to read + # `columns` and/or `row_groups` + data = _get_parquet_byte_ranges( + [path], + fs, + metadata=metadata, + columns=columns, + row_groups=row_groups, + engine=engine, + max_gap=max_gap, + max_block=max_block, + footer_sample_size=footer_sample_size, + ) + + # Extract file name from `data` + fn = next(iter(data)) if data else path + + # Call self.open with "parts" caching + options = kwargs.pop("cache_options", {}).copy() + return fs.open( + fn, + mode=mode, + cache_type="parts", + cache_options={ + **options, + **{ + "data": data.get(fn, {}), + "strict": strict, + }, + }, + **kwargs, + ) + + +def _get_parquet_byte_ranges( + paths, + fs, + metadata=None, + columns=None, + row_groups=None, + max_gap=64_000, + max_block=256_000_000, + footer_sample_size=1_000_000, + engine="auto", +): + """Get a dictionary of the known byte ranges needed + to read a specific column/row-group selection from a + Parquet dataset. Each value in the output dictionary + is intended for use as the `data` argument for the + `KnownPartsOfAFile` caching strategy of a single path. + """ + + # Set engine if necessary + if isinstance(engine, str): + engine = _set_engine(engine) + + # Pass to specialized function if metadata is defined + if metadata is not None: + + # Use the provided parquet metadata object + # to avoid transferring/parsing footer metadata + return _get_parquet_byte_ranges_from_metadata( + metadata, + fs, + engine, + columns=columns, + row_groups=row_groups, + max_gap=max_gap, + max_block=max_block, + ) + + # Get file sizes asynchronously + file_sizes = fs.sizes(paths) + + # Populate global paths, starts, & ends + result = {} + data_paths = [] + data_starts = [] + data_ends = [] + add_header_magic = True + if columns is None and row_groups is None: + # We are NOT selecting specific columns or row-groups. + # + # We can avoid sampling the footers, and just transfer + # all file data with cat_ranges + for i, path in enumerate(paths): + result[path] = {} + for b in range(0, file_sizes[i], max_block): + data_paths.append(path) + data_starts.append(b) + data_ends.append(min(b + max_block, file_sizes[i])) + add_header_magic = False # "Magic" should already be included + else: + # We ARE selecting specific columns or row-groups. + # + # Gather file footers. + # We just take the last `footer_sample_size` bytes of each + # file (or the entire file if it is smaller than that) + footer_starts = [] + footer_ends = [] + for i, path in enumerate(paths): + footer_ends.append(file_sizes[i]) + sample_size = max(0, file_sizes[i] - footer_sample_size) + footer_starts.append(sample_size) + footer_samples = fs.cat_ranges(paths, footer_starts, footer_ends) + + # Check our footer samples and re-sample if necessary. + missing_footer_starts = footer_starts.copy() + large_footer = 0 + for i, path in enumerate(paths): + footer_size = int.from_bytes(footer_samples[i][-8:-4], "little") + real_footer_start = file_sizes[i] - (footer_size + 8) + if real_footer_start < footer_starts[i]: + missing_footer_starts[i] = real_footer_start + large_footer = max(large_footer, (footer_size + 8)) + if large_footer: + warnings.warn( + f"Not enough data was used to sample the parquet footer. " + f"Try setting footer_sample_size >= {large_footer}." + ) + for i, block in enumerate( + fs.cat_ranges( + paths, + missing_footer_starts, + footer_starts, + ) + ): + footer_samples[i] = block + footer_samples[i] + footer_starts[i] = missing_footer_starts[i] + + # Calculate required byte ranges for each path + for i, path in enumerate(paths): + + # Deal with small-file case. + # Just include all remaining bytes of the file + # in a single range. + if file_sizes[i] < max_block: + if footer_starts[i] > 0: + # Only need to transfer the data if the + # footer sample isn't already the whole file + data_paths.append(path) + data_starts.append(0) + data_ends.append(footer_starts[i]) + continue + + # Use "engine" to collect data byte ranges + path_data_starts, path_data_ends = engine._parquet_byte_ranges( + columns, + row_groups=row_groups, + footer=footer_samples[i], + footer_start=footer_starts[i], + ) + + data_paths += [path] * len(path_data_starts) + data_starts += path_data_starts + data_ends += path_data_ends + + # Merge adjacent offset ranges + data_paths, data_starts, data_ends = merge_offset_ranges( + data_paths, + data_starts, + data_ends, + max_gap=max_gap, + max_block=max_block, + sort=False, # Should already be sorted + ) + + # Start by populating `result` with footer samples + for i, path in enumerate(paths): + result[path] = {(footer_starts[i], footer_ends[i]): footer_samples[i]} + + # Transfer the data byte-ranges into local memory + _transfer_ranges(fs, result, data_paths, data_starts, data_ends) + + # Add b"PAR1" to header if necessary + if add_header_magic: + _add_header_magic(result) + + return result + + +def _get_parquet_byte_ranges_from_metadata( + metadata, + fs, + engine, + columns=None, + row_groups=None, + max_gap=64_000, + max_block=256_000_000, +): + """Simplified version of `_get_parquet_byte_ranges` for + the case that an engine-specific `metadata` object is + provided, and the remote footer metadata does not need to + be transferred before calculating the required byte ranges. + """ + + # Use "engine" to collect data byte ranges + data_paths, data_starts, data_ends = engine._parquet_byte_ranges( + columns, + row_groups=row_groups, + metadata=metadata, + ) + + # Merge adjacent offset ranges + data_paths, data_starts, data_ends = merge_offset_ranges( + data_paths, + data_starts, + data_ends, + max_gap=max_gap, + max_block=max_block, + sort=False, # Should be sorted + ) + + # Transfer the data byte-ranges into local memory + result = {fn: {} for fn in list(set(data_paths))} + _transfer_ranges(fs, result, data_paths, data_starts, data_ends) + + # Add b"PAR1" to header + _add_header_magic(result) + + return result + + +def _transfer_ranges(fs, blocks, paths, starts, ends): + # Use cat_ranges to gather the data byte_ranges + ranges = (paths, starts, ends) + for path, start, stop, data in zip(*ranges, fs.cat_ranges(*ranges)): + blocks[path][(start, stop)] = data + + +def _add_header_magic(data): + # Add b"PAR1" to file headers + for i, path in enumerate(list(data.keys())): + add_magic = True + for k in data[path].keys(): + if k[0] == 0 and k[1] >= 4: + add_magic = False + break + if add_magic: + data[path][(0, 4)] = b"PAR1" + + +def _set_engine(engine_str): + + # Define a list of parquet engines to try + if engine_str == "auto": + try_engines = ("fastparquet", "pyarrow") + elif not isinstance(engine_str, str): + raise ValueError( + "Failed to set parquet engine! " + "Please pass 'fastparquet', 'pyarrow', or 'auto'" + ) + elif engine_str not in ("fastparquet", "pyarrow"): + raise ValueError(f"{engine_str} engine not supported by `fsspec.parquet`") + else: + try_engines = [engine_str] + + # Try importing the engines in `try_engines`, + # and choose the first one that succeeds + for engine in try_engines: + try: + if engine == "fastparquet": + return FastparquetEngine() + elif engine == "pyarrow": + return PyarrowEngine() + except ImportError: + pass + + # Raise an error if a supported parquet engine + # was not found + raise ImportError( + f"The following parquet engines are not installed " + f"in your python environment: {try_engines}." + f"Please install 'fastparquert' or 'pyarrow' to " + f"utilize the `fsspec.parquet` module." + ) + + +class FastparquetEngine: + + # The purpose of the FastparquetEngine class is + # to check if fastparquet can be imported (on initialization) + # and to define a `_parquet_byte_ranges` method. In the + # future, this class may also be used to define other + # methods/logic that are specific to fastparquet. + + def __init__(self): + import fastparquet as fp + + self.fp = fp + + def _row_group_filename(self, row_group, pf): + return pf.row_group_filename(row_group) + + def _parquet_byte_ranges( + self, + columns, + row_groups=None, + metadata=None, + footer=None, + footer_start=None, + ): + + # Initialize offset ranges and define ParqetFile metadata + pf = metadata + data_paths, data_starts, data_ends = [], [], [] + if pf is None: + pf = self.fp.ParquetFile(io.BytesIO(footer)) + + # Convert columns to a set and add any index columns + # specified in the pandas metadata (just in case) + column_set = None if columns is None else set(columns) + if column_set is not None and hasattr(pf, "pandas_metadata"): + md_index = [ + ind + for ind in pf.pandas_metadata.get("index_columns", []) + # Ignore RangeIndex information + if not isinstance(ind, dict) + ] + column_set |= set(md_index) + + # Check if row_groups is a list of integers + # or a list of row-group metadata + if row_groups and not isinstance(row_groups[0], int): + # Input row_groups contains row-group metadata + row_group_indices = None + else: + # Input row_groups contains row-group indices + row_group_indices = row_groups + row_groups = pf.row_groups + + # Loop through column chunks to add required byte ranges + for r, row_group in enumerate(row_groups): + # Skip this row-group if we are targeting + # specific row-groups + if row_group_indices is None or r in row_group_indices: + + # Find the target parquet-file path for `row_group` + fn = self._row_group_filename(row_group, pf) + + for column in row_group.columns: + name = column.meta_data.path_in_schema[0] + # Skip this column if we are targeting a + # specific columns + if column_set is None or name in column_set: + file_offset0 = column.meta_data.dictionary_page_offset + if file_offset0 is None: + file_offset0 = column.meta_data.data_page_offset + num_bytes = column.meta_data.total_compressed_size + if footer_start is None or file_offset0 < footer_start: + data_paths.append(fn) + data_starts.append(file_offset0) + data_ends.append( + min( + file_offset0 + num_bytes, + footer_start or (file_offset0 + num_bytes), + ) + ) + + if metadata: + # The metadata in this call may map to multiple + # file paths. Need to include `data_paths` + return data_paths, data_starts, data_ends + return data_starts, data_ends + + +class PyarrowEngine: + + # The purpose of the PyarrowEngine class is + # to check if pyarrow can be imported (on initialization) + # and to define a `_parquet_byte_ranges` method. In the + # future, this class may also be used to define other + # methods/logic that are specific to pyarrow. + + def __init__(self): + import pyarrow.parquet as pq + + self.pq = pq + + def _row_group_filename(self, row_group, metadata): + raise NotImplementedError + + def _parquet_byte_ranges( + self, + columns, + row_groups=None, + metadata=None, + footer=None, + footer_start=None, + ): + + if metadata is not None: + raise ValueError("metadata input not supported for PyarrowEngine") + + data_starts, data_ends = [], [] + md = self.pq.ParquetFile(io.BytesIO(footer)).metadata + + # Convert columns to a set and add any index columns + # specified in the pandas metadata (just in case) + column_set = None if columns is None else set(columns) + if column_set is not None: + schema = md.schema.to_arrow_schema() + has_pandas_metadata = ( + schema.metadata is not None and b"pandas" in schema.metadata + ) + if has_pandas_metadata: + md_index = [ + ind + for ind in json.loads( + schema.metadata[b"pandas"].decode("utf8") + ).get("index_columns", []) + # Ignore RangeIndex information + if not isinstance(ind, dict) + ] + column_set |= set(md_index) + + # Loop through column chunks to add required byte ranges + for r in range(md.num_row_groups): + # Skip this row-group if we are targeting + # specific row-groups + if row_groups is None or r in row_groups: + row_group = md.row_group(r) + for c in range(row_group.num_columns): + column = row_group.column(c) + name = column.path_in_schema + # Skip this column if we are targeting a + # specific columns + split_name = name.split(".")[0] + if ( + column_set is None + or name in column_set + or split_name in column_set + ): + file_offset0 = column.dictionary_page_offset + if file_offset0 is None: + file_offset0 = column.data_page_offset + num_bytes = column.total_compressed_size + if file_offset0 < footer_start: + data_starts.append(file_offset0) + data_ends.append( + min(file_offset0 + num_bytes, footer_start) + ) + return data_starts, data_ends diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/registry.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..148241f986372e062646c32a1bff0d8b964950d2 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/registry.py @@ -0,0 +1,270 @@ +import importlib + +__all__ = ["registry", "get_filesystem_class", "default"] + +# mapping protocol: implementation class object +_registry = {} # internal, mutable + + +class ReadOnlyError(TypeError): + pass + + +class ReadOnlyRegistry(dict): + """Dict-like registry, but immutable + + Maps backend name to implementation class + + To add backend implementations, use ``register_implementation`` + """ + + def __init__(self, target): + self.target = target + + def __getitem__(self, item): + return self.target[item] + + def __delitem__(self, key): + raise ReadOnlyError + + def __setitem__(self, key, value): + raise ReadOnlyError + + def clear(self): + raise ReadOnlyError + + def __contains__(self, item): + return item in self.target + + def __iter__(self): + yield from self.target + + +def register_implementation(name, cls, clobber=True, errtxt=None): + """Add implementation class to the registry + + Parameters + ---------- + name: str + Protocol name to associate with the class + cls: class or str + if a class: fsspec-compliant implementation class (normally inherits from + ``fsspec.AbstractFileSystem``, gets added straight to the registry. If a + str, the full path to an implementation class like package.module.class, + which gets added to known_implementations, + so the import is deferred until the filesystem is actually used. + clobber: bool (optional) + Whether to overwrite a protocol with the same name; if False, will raise + instead. + errtxt: str (optional) + If given, then a failure to import the given class will result in this + text being given. + """ + if isinstance(cls, str): + if name in known_implementations and clobber is False: + raise ValueError( + "Name (%s) already in the known_implementations and clobber " + "is False" % name + ) + known_implementations[name] = { + "class": cls, + "err": errtxt or "%s import failed for protocol %s" % (cls, name), + } + + else: + if name in registry and clobber is False: + raise ValueError( + "Name (%s) already in the registry and clobber is False" % name + ) + _registry[name] = cls + + +registry = ReadOnlyRegistry(_registry) +default = "file" + +# protocols mapped to the class which implements them. This dict can +# updated with register_implementation +known_implementations = { + "file": {"class": "fsspec.implementations.local.LocalFileSystem"}, + "memory": {"class": "fsspec.implementations.memory.MemoryFileSystem"}, + "dropbox": { + "class": "dropboxdrivefs.DropboxDriveFileSystem", + "err": ( + 'DropboxFileSystem requires "dropboxdrivefs",' + '"requests" and "dropbox" to be installed' + ), + }, + "http": { + "class": "fsspec.implementations.http.HTTPFileSystem", + "err": 'HTTPFileSystem requires "requests" and "aiohttp" to be installed', + }, + "https": { + "class": "fsspec.implementations.http.HTTPFileSystem", + "err": 'HTTPFileSystem requires "requests" and "aiohttp" to be installed', + }, + "zip": {"class": "fsspec.implementations.zip.ZipFileSystem"}, + "tar": {"class": "fsspec.implementations.tar.TarFileSystem"}, + "gcs": { + "class": "gcsfs.GCSFileSystem", + "err": "Please install gcsfs to access Google Storage", + }, + "gs": { + "class": "gcsfs.GCSFileSystem", + "err": "Please install gcsfs to access Google Storage", + }, + "gdrive": { + "class": "gdrivefs.GoogleDriveFileSystem", + "err": "Please install gdrivefs for access to Google Drive", + }, + "sftp": { + "class": "fsspec.implementations.sftp.SFTPFileSystem", + "err": 'SFTPFileSystem requires "paramiko" to be installed', + }, + "ssh": { + "class": "fsspec.implementations.sftp.SFTPFileSystem", + "err": 'SFTPFileSystem requires "paramiko" to be installed', + }, + "ftp": {"class": "fsspec.implementations.ftp.FTPFileSystem"}, + "hdfs": { + "class": "fsspec.implementations.hdfs.PyArrowHDFS", + "err": "pyarrow and local java libraries required for HDFS", + }, + "arrow_hdfs": { + "class": "fsspec.implementations.arrow.HadoopFileSystem", + "err": "pyarrow and local java libraries required for HDFS", + }, + "webhdfs": { + "class": "fsspec.implementations.webhdfs.WebHDFS", + "err": 'webHDFS access requires "requests" to be installed', + }, + "s3": {"class": "s3fs.S3FileSystem", "err": "Install s3fs to access S3"}, + "s3a": {"class": "s3fs.S3FileSystem", "err": "Install s3fs to access S3"}, + "wandb": {"class": "wandbfs.WandbFS", "err": "Install wandbfs to access wandb"}, + "oci": { + "class": "ocifs.OCIFileSystem", + "err": "Install ocifs to access OCI Object Storage", + }, + "adl": { + "class": "adlfs.AzureDatalakeFileSystem", + "err": "Install adlfs to access Azure Datalake Gen1", + }, + "abfs": { + "class": "adlfs.AzureBlobFileSystem", + "err": "Install adlfs to access Azure Datalake Gen2 and Azure Blob Storage", + }, + "az": { + "class": "adlfs.AzureBlobFileSystem", + "err": "Install adlfs to access Azure Datalake Gen2 and Azure Blob Storage", + }, + "cached": {"class": "fsspec.implementations.cached.CachingFileSystem"}, + "blockcache": {"class": "fsspec.implementations.cached.CachingFileSystem"}, + "filecache": {"class": "fsspec.implementations.cached.WholeFileCacheFileSystem"}, + "simplecache": {"class": "fsspec.implementations.cached.SimpleCacheFileSystem"}, + "dask": { + "class": "fsspec.implementations.dask.DaskWorkerFileSystem", + "err": "Install dask distributed to access worker file system", + }, + "dbfs": { + "class": "fsspec.implementations.dbfs.DatabricksFileSystem", + "err": "Install the requests package to use the DatabricksFileSystem", + }, + "github": { + "class": "fsspec.implementations.github.GithubFileSystem", + "err": "Install the requests package to use the github FS", + }, + "git": { + "class": "fsspec.implementations.git.GitFileSystem", + "err": "Install pygit2 to browse local git repos", + }, + "smb": { + "class": "fsspec.implementations.smb.SMBFileSystem", + "err": 'SMB requires "smbprotocol" or "smbprotocol[kerberos]" installed', + }, + "jupyter": { + "class": "fsspec.implementations.jupyter.JupyterFileSystem", + "err": "Jupyter FS requires requests to be installed", + }, + "jlab": { + "class": "fsspec.implementations.jupyter.JupyterFileSystem", + "err": "Jupyter FS requires requests to be installed", + }, + "libarchive": { + "class": "fsspec.implementations.libarchive.LibArchiveFileSystem", + "err": "LibArchive requires to be installed", + }, + "reference": {"class": "fsspec.implementations.reference.ReferenceFileSystem"}, + "generic": {"class": "fsspec.generic.GenericFileSystem"}, + "oss": { + "class": "ossfs.OSSFileSystem", + "err": "Install ossfs to access Alibaba Object Storage System", + }, + "webdav": { + "class": "webdav4.fsspec.WebdavFileSystem", + "err": "Install webdav4 to access WebDAV", + }, +} + + +def get_filesystem_class(protocol): + """Fetch named protocol implementation from the registry + + The dict ``known_implementations`` maps protocol names to the locations + of classes implementing the corresponding file-system. When used for the + first time, appropriate imports will happen and the class will be placed in + the registry. All subsequent calls will fetch directly from the registry. + + Some protocol implementations require additional dependencies, and so the + import may fail. In this case, the string in the "err" field of the + ``known_implementations`` will be given as the error message. + """ + if not protocol: + protocol = default + + if protocol not in registry: + if protocol not in known_implementations: + raise ValueError("Protocol not known: %s" % protocol) + bit = known_implementations[protocol] + try: + register_implementation(protocol, _import_class(bit["class"])) + except ImportError as e: + raise ImportError(bit["err"]) from e + cls = registry[protocol] + if getattr(cls, "protocol", None) in ("abstract", None): + cls.protocol = protocol + + return cls + + +def _import_class(cls, minv=None): + """Take a string FQP and return the imported class or identifier + + clas is of the form "package.module.klass" or "package.module:subobject.klass" + """ + if ":" in cls: + mod, name = cls.rsplit(":", 1) + mod = importlib.import_module(mod) + for part in name.split("."): + mod = getattr(mod, part) + return mod + else: + mod, name = cls.rsplit(".", 1) + mod = importlib.import_module(mod) + return getattr(mod, name) + + +def filesystem(protocol, **storage_options): + """Instantiate filesystems for given protocol and arguments + + ``storage_options`` are specific to the protocol being chosen, and are + passed directly to the class. + """ + cls = get_filesystem_class(protocol) + return cls(**storage_options) + + +def available_protocols(): + """Return a list of the implemented protocols. + + Note that any given protocol may require extra packages to be importable. + """ + return list(known_implementations) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/spec.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/spec.py new file mode 100644 index 0000000000000000000000000000000000000000..3743a825378eeba064daba697338156e646ff920 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/spec.py @@ -0,0 +1,1697 @@ +import io +import logging +import os +import threading +import warnings +import weakref +from errno import ESPIPE +from glob import has_magic +from hashlib import sha256 + +from .callbacks import _DEFAULT_CALLBACK +from .config import apply_config, conf +from .dircache import DirCache +from .transaction import Transaction +from .utils import ( + _unstrip_protocol, + isfilelike, + other_paths, + read_block, + stringify_path, + tokenize, +) + +logger = logging.getLogger("fsspec") + + +def make_instance(cls, args, kwargs): + return cls(*args, **kwargs) + + +class _Cached(type): + """ + Metaclass for caching file system instances. + + Notes + ----- + Instances are cached according to + + * The values of the class attributes listed in `_extra_tokenize_attributes` + * The arguments passed to ``__init__``. + + This creates an additional reference to the filesystem, which prevents the + filesystem from being garbage collected when all *user* references go away. + A call to the :meth:`AbstractFileSystem.clear_instance_cache` must *also* + be made for a filesystem instance to be garbage collected. + """ + + def __init__(cls, *args, **kwargs): + super().__init__(*args, **kwargs) + # Note: we intentionally create a reference here, to avoid garbage + # collecting instances when all other references are gone. To really + # delete a FileSystem, the cache must be cleared. + if conf.get("weakref_instance_cache"): # pragma: no cover + # debug option for analysing fork/spawn conditions + cls._cache = weakref.WeakValueDictionary() + else: + cls._cache = {} + cls._pid = os.getpid() + + def __call__(cls, *args, **kwargs): + kwargs = apply_config(cls, kwargs) + extra_tokens = tuple( + getattr(cls, attr, None) for attr in cls._extra_tokenize_attributes + ) + token = tokenize( + cls, cls._pid, threading.get_ident(), *args, *extra_tokens, **kwargs + ) + skip = kwargs.pop("skip_instance_cache", False) + if os.getpid() != cls._pid: + cls._cache.clear() + cls._pid = os.getpid() + if not skip and cls.cachable and token in cls._cache: + cls._latest = token + return cls._cache[token] + else: + obj = super().__call__(*args, **kwargs) + # Setting _fs_token here causes some static linters to complain. + obj._fs_token_ = token + obj.storage_args = args + obj.storage_options = kwargs + if obj.async_impl: + from .asyn import mirror_sync_methods + + mirror_sync_methods(obj) + + if cls.cachable and not skip: + cls._latest = token + cls._cache[token] = obj + return obj + + +class AbstractFileSystem(metaclass=_Cached): + """ + An abstract super-class for pythonic file-systems + + Implementations are expected to be compatible with or, better, subclass + from here. + """ + + cachable = True # this class can be cached, instances reused + _cached = False + blocksize = 2**22 + sep = "/" + protocol = "abstract" + _latest = None + async_impl = False + root_marker = "" # For some FSs, may require leading '/' or other character + + #: Extra *class attributes* that should be considered when hashing. + _extra_tokenize_attributes = () + + def __init__(self, *args, **storage_options): + """Create and configure file-system instance + + Instances may be cachable, so if similar enough arguments are seen + a new instance is not required. The token attribute exists to allow + implementations to cache instances if they wish. + + A reasonable default should be provided if there are no arguments. + + Subclasses should call this method. + + Parameters + ---------- + use_listings_cache, listings_expiry_time, max_paths: + passed to ``DirCache``, if the implementation supports + directory listing caching. Pass use_listings_cache=False + to disable such caching. + skip_instance_cache: bool + If this is a cachable implementation, pass True here to force + creating a new instance even if a matching instance exists, and prevent + storing this instance. + asynchronous: bool + loop: asyncio-compatible IOLoop or None + """ + if self._cached: + # reusing instance, don't change + return + self._cached = True + self._intrans = False + self._transaction = None + self._invalidated_caches_in_transaction = [] + self.dircache = DirCache(**storage_options) + + if storage_options.pop("add_docs", None): + warnings.warn("add_docs is no longer supported.", FutureWarning) + + if storage_options.pop("add_aliases", None): + warnings.warn("add_aliases has been removed.", FutureWarning) + # This is set in _Cached + self._fs_token_ = None + + @property + def _fs_token(self): + return self._fs_token_ + + def __dask_tokenize__(self): + return self._fs_token + + def __hash__(self): + return int(self._fs_token, 16) + + def __eq__(self, other): + return isinstance(other, type(self)) and self._fs_token == other._fs_token + + def __reduce__(self): + return make_instance, (type(self), self.storage_args, self.storage_options) + + @classmethod + def _strip_protocol(cls, path): + """Turn path from fully-qualified to file-system-specific + + May require FS-specific handling, e.g., for relative paths or links. + """ + if isinstance(path, list): + return [cls._strip_protocol(p) for p in path] + path = stringify_path(path) + protos = (cls.protocol,) if isinstance(cls.protocol, str) else cls.protocol + for protocol in protos: + if path.startswith(protocol + "://"): + path = path[len(protocol) + 3 :] + elif path.startswith(protocol + "::"): + path = path[len(protocol) + 2 :] + path = path.rstrip("/") + # use of root_marker to make minimum required path, e.g., "/" + return path or cls.root_marker + + def unstrip_protocol(self, name): + """Format FS-specific path to generic, including protocol""" + if isinstance(self.protocol, str): + if name.startswith(self.protocol): + return name + return self.protocol + "://" + name + else: + if name.startswith(tuple(self.protocol)): + return name + return self.protocol[0] + "://" + name + + @staticmethod + def _get_kwargs_from_urls(path): + """If kwargs can be encoded in the paths, extract them here + + This should happen before instantiation of the class; incoming paths + then should be amended to strip the options in methods. + + Examples may look like an sftp path "sftp://user@host:/my/path", where + the user and host should become kwargs and later get stripped. + """ + # by default, nothing happens + return {} + + @classmethod + def current(cls): + """Return the most recently instantiated FileSystem + + If no instance has been created, then create one with defaults + """ + if cls._latest in cls._cache: + return cls._cache[cls._latest] + return cls() + + @property + def transaction(self): + """A context within which files are committed together upon exit + + Requires the file class to implement `.commit()` and `.discard()` + for the normal and exception cases. + """ + if self._transaction is None: + self._transaction = Transaction(self) + return self._transaction + + def start_transaction(self): + """Begin write transaction for deferring files, non-context version""" + self._intrans = True + self._transaction = Transaction(self) + return self.transaction + + def end_transaction(self): + """Finish write transaction, non-context version""" + self.transaction.complete() + self._transaction = None + # The invalid cache must be cleared after the transcation is completed. + for path in self._invalidated_caches_in_transaction: + self.invalidate_cache(path) + self._invalidated_caches_in_transaction.clear() + + def invalidate_cache(self, path=None): + """ + Discard any cached directory information + + Parameters + ---------- + path: string or None + If None, clear all listings cached else listings at or under given + path. + """ + # Not necessary to implement invalidation mechanism, may have no cache. + # But if have, you should call this method of parent class from your + # subclass to ensure expiring caches after transacations correctly. + # See the implementation of FTPFileSystem in ftp.py + if self._intrans: + self._invalidated_caches_in_transaction.append(path) + + def mkdir(self, path, create_parents=True, **kwargs): + """ + Create directory entry at path + + For systems that don't have true directories, may create an for + this instance only and not touch the real filesystem + + Parameters + ---------- + path: str + location + create_parents: bool + if True, this is equivalent to ``makedirs`` + kwargs: + may be permissions, etc. + """ + pass # not necessary to implement, may not have directories + + def makedirs(self, path, exist_ok=False): + """Recursively make directories + + Creates directory at path and any intervening required directories. + Raises exception if, for instance, the path already exists but is a + file. + + Parameters + ---------- + path: str + leaf directory name + exist_ok: bool (False) + If False, will error if the target already exists + """ + pass # not necessary to implement, may not have directories + + def rmdir(self, path): + """Remove a directory, if empty""" + pass # not necessary to implement, may not have directories + + def ls(self, path, detail=True, **kwargs): + """List objects at path. + + This should include subdirectories and files at that location. The + difference between a file and a directory must be clear when details + are requested. + + The specific keys, or perhaps a FileInfo class, or similar, is TBD, + but must be consistent across implementations. + Must include: + + - full path to the entry (without protocol) + - size of the entry, in bytes. If the value cannot be determined, will + be ``None``. + - type of entry, "file", "directory" or other + + Additional information + may be present, aproriate to the file-system, e.g., generation, + checksum, etc. + + May use refresh=True|False to allow use of self._ls_from_cache to + check for a saved listing and avoid calling the backend. This would be + common where listing may be expensive. + + Parameters + ---------- + path: str + detail: bool + if True, gives a list of dictionaries, where each is the same as + the result of ``info(path)``. If False, gives a list of paths + (str). + kwargs: may have additional backend-specific options, such as version + information + + Returns + ------- + List of strings if detail is False, or list of directory information + dicts if detail is True. + """ + raise NotImplementedError + + def _ls_from_cache(self, path): + """Check cache for listing + + Returns listing, if found (may me empty list for a directly that exists + but contains nothing), None if not in cache. + """ + parent = self._parent(path) + if path.rstrip("/") in self.dircache: + return self.dircache[path.rstrip("/")] + try: + files = [ + f + for f in self.dircache[parent] + if f["name"] == path + or (f["name"] == path.rstrip("/") and f["type"] == "directory") + ] + if len(files) == 0: + # parent dir was listed but did not contain this file + raise FileNotFoundError(path) + return files + except KeyError: + pass + + def walk(self, path, maxdepth=None, **kwargs): + """Return all files belows path + + List all files, recursing into subdirectories; output is iterator-style, + like ``os.walk()``. For a simple list of files, ``find()`` is available. + + Note that the "files" outputted will include anything that is not + a directory, such as links. + + Parameters + ---------- + path: str + Root to recurse into + maxdepth: int + Maximum recursion depth. None means limitless, but not recommended + on link-based file-systems. + kwargs: passed to ``ls`` + """ + path = self._strip_protocol(path) + full_dirs = {} + dirs = {} + files = {} + + detail = kwargs.pop("detail", False) + try: + listing = self.ls(path, detail=True, **kwargs) + except (FileNotFoundError, IOError): + if detail: + return path, {}, {} + return path, [], [] + + for info in listing: + # each info name must be at least [path]/part , but here + # we check also for names like [path]/part/ + pathname = info["name"].rstrip("/") + name = pathname.rsplit("/", 1)[-1] + if info["type"] == "directory" and pathname != path: + # do not include "self" path + full_dirs[pathname] = info + dirs[name] = info + elif pathname == path: + # file-like with same name as give path + files[""] = info + else: + files[name] = info + + if detail: + yield path, dirs, files + else: + yield path, list(dirs), list(files) + + if maxdepth is not None: + maxdepth -= 1 + if maxdepth < 1: + return + + for d in full_dirs: + yield from self.walk(d, maxdepth=maxdepth, detail=detail, **kwargs) + + def find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs): + """List all files below path. + + Like posix ``find`` command without conditions + + Parameters + ---------- + path : str + maxdepth: int or None + If not None, the maximum number of levels to descend + withdirs: bool + Whether to include directory paths in the output. This is True + when used by glob, but users usually only want files. + kwargs are passed to ``ls``. + """ + # TODO: allow equivalent of -name parameter + path = self._strip_protocol(path) + out = dict() + for _, dirs, files in self.walk(path, maxdepth, detail=True, **kwargs): + if withdirs: + files.update(dirs) + out.update({info["name"]: info for name, info in files.items()}) + if not out and self.isfile(path): + # walk works on directories, but find should also return [path] + # when path happens to be a file + out[path] = {} + names = sorted(out) + if not detail: + return names + else: + return {name: out[name] for name in names} + + def du(self, path, total=True, maxdepth=None, **kwargs): + """Space used by files within a path + + Parameters + ---------- + path: str + total: bool + whether to sum all the file sizes + maxdepth: int or None + maximum number of directory levels to descend, None for unlimited. + kwargs: passed to ``ls`` + + Returns + ------- + Dict of {fn: size} if total=False, or int otherwise, where numbers + refer to bytes used. + """ + sizes = {} + for f in self.find(path, maxdepth=maxdepth, **kwargs): + info = self.info(f) + sizes[info["name"]] = info["size"] + if total: + return sum(sizes.values()) + else: + return sizes + + def glob(self, path, **kwargs): + """ + Find files by glob-matching. + + If the path ends with '/' and does not contain "*", it is essentially + the same as ``ls(path)``, returning only files. + + We support ``"**"``, + ``"?"`` and ``"[..]"``. We do not support ^ for pattern negation. + + Search path names that contain embedded characters special to this + implementation of glob may not produce expected results; + e.g., 'foo/bar/*starredfilename*'. + + kwargs are passed to ``ls``. + """ + import re + + ends = path.endswith("/") + path = self._strip_protocol(path) + indstar = path.find("*") if path.find("*") >= 0 else len(path) + indques = path.find("?") if path.find("?") >= 0 else len(path) + indbrace = path.find("[") if path.find("[") >= 0 else len(path) + + ind = min(indstar, indques, indbrace) + + detail = kwargs.pop("detail", False) + + if not has_magic(path): + root = path + depth = 1 + if ends: + path += "/*" + elif self.exists(path): + if not detail: + return [path] + else: + return {path: self.info(path)} + else: + if not detail: + return [] # glob of non-existent returns empty + else: + return {} + elif "/" in path[:ind]: + ind2 = path[:ind].rindex("/") + root = path[: ind2 + 1] + depth = None if "**" in path else path[ind2 + 1 :].count("/") + 1 + else: + root = "" + depth = None if "**" in path else path[ind + 1 :].count("/") + 1 + + allpaths = self.find(root, maxdepth=depth, withdirs=True, detail=True, **kwargs) + # Escape characters special to python regex, leaving our supported + # special characters in place. + # See https://www.gnu.org/software/bash/manual/html_node/Pattern-Matching.html + # for shell globbing details. + pattern = ( + "^" + + ( + path.replace("\\", r"\\") + .replace(".", r"\.") + .replace("+", r"\+") + .replace("//", "/") + .replace("(", r"\(") + .replace(")", r"\)") + .replace("|", r"\|") + .replace("^", r"\^") + .replace("$", r"\$") + .replace("{", r"\{") + .replace("}", r"\}") + .rstrip("/") + .replace("?", ".") + ) + + "$" + ) + pattern = re.sub("[*]{2}", "=PLACEHOLDER=", pattern) + pattern = re.sub("[*]", "[^/]*", pattern) + pattern = re.compile(pattern.replace("=PLACEHOLDER=", ".*")) + out = { + p: allpaths[p] + for p in sorted(allpaths) + if pattern.match(p.replace("//", "/").rstrip("/")) + } + if detail: + return out + else: + return list(out) + + def exists(self, path, **kwargs): + """Is there a file at the given path""" + try: + self.info(path, **kwargs) + return True + except: # noqa: E722 + # any exception allowed bar FileNotFoundError? + return False + + def lexists(self, path, **kwargs): + """If there is a file at the given path (including + broken links)""" + return self.exists(path) + + def info(self, path, **kwargs): + """Give details of entry at path + + Returns a single dictionary, with exactly the same information as ``ls`` + would with ``detail=True``. + + The default implementation should calls ls and could be overridden by a + shortcut. kwargs are passed on to ```ls()``. + + Some file systems might not be able to measure the file's size, in + which case, the returned dict will include ``'size': None``. + + Returns + ------- + dict with keys: name (full path in the FS), size (in bytes), type (file, + directory, or something else) and other FS-specific keys. + """ + path = self._strip_protocol(path) + out = self.ls(self._parent(path), detail=True, **kwargs) + out = [o for o in out if o["name"].rstrip("/") == path] + if out: + return out[0] + out = self.ls(path, detail=True, **kwargs) + path = path.rstrip("/") + out1 = [o for o in out if o["name"].rstrip("/") == path] + if len(out1) == 1: + if "size" not in out1[0]: + out1[0]["size"] = None + return out1[0] + elif len(out1) > 1 or out: + return {"name": path, "size": 0, "type": "directory"} + else: + raise FileNotFoundError(path) + + def checksum(self, path): + """Unique value for current version of file + + If the checksum is the same from one moment to another, the contents + are guaranteed to be the same. If the checksum changes, the contents + *might* have changed. + + This should normally be overridden; default will probably capture + creation/modification timestamp (which would be good) or maybe + access timestamp (which would be bad) + """ + return int(tokenize(self.info(path)), 16) + + def size(self, path): + """Size in bytes of file""" + return self.info(path).get("size", None) + + def sizes(self, paths): + """Size in bytes of each file in a list of paths""" + return [self.size(p) for p in paths] + + def isdir(self, path): + """Is this entry directory-like?""" + try: + return self.info(path)["type"] == "directory" + except IOError: + return False + + def isfile(self, path): + """Is this entry file-like?""" + try: + return self.info(path)["type"] == "file" + except: # noqa: E722 + return False + + def cat_file(self, path, start=None, end=None, **kwargs): + """Get the content of a file + + Parameters + ---------- + path: URL of file on this filesystems + start, end: int + Bytes limits of the read. If negative, backwards from end, + like usual python slices. Either can be None for start or + end of file, respectively + kwargs: passed to ``open()``. + """ + # explicitly set buffering off? + with self.open(path, "rb", **kwargs) as f: + if start is not None: + if start >= 0: + f.seek(start) + else: + f.seek(max(0, f.size + start)) + if end is not None: + if end < 0: + end = f.size + end + return f.read(end - f.tell()) + return f.read() + + def pipe_file(self, path, value, **kwargs): + """Set the bytes of given file""" + with self.open(path, "wb") as f: + f.write(value) + + def pipe(self, path, value=None, **kwargs): + """Put value into path + + (counterpart to ``cat``) + + Parameters + ---------- + path: string or dict(str, bytes) + If a string, a single remote location to put ``value`` bytes; if a dict, + a mapping of {path: bytesvalue}. + value: bytes, optional + If using a single path, these are the bytes to put there. Ignored if + ``path`` is a dict + """ + if isinstance(path, str): + self.pipe_file(self._strip_protocol(path), value, **kwargs) + elif isinstance(path, dict): + for k, v in path.items(): + self.pipe_file(self._strip_protocol(k), v, **kwargs) + else: + raise ValueError("path must be str or dict") + + def cat_ranges(self, paths, starts, ends, max_gap=None, **kwargs): + if max_gap is not None: + raise NotImplementedError + if not isinstance(paths, list): + raise TypeError + if not isinstance(starts, list): + starts = [starts] * len(paths) + if not isinstance(ends, list): + ends = [starts] * len(paths) + if len(starts) != len(paths) or len(ends) != len(paths): + raise ValueError + return [self.cat_file(p, s, e) for p, s, e in zip(paths, starts, ends)] + + def cat(self, path, recursive=False, on_error="raise", **kwargs): + """Fetch (potentially multiple) paths' contents + + Parameters + ---------- + recursive: bool + If True, assume the path(s) are directories, and get all the + contained files + on_error : "raise", "omit", "return" + If raise, an underlying exception will be raised (converted to KeyError + if the type is in self.missing_exceptions); if omit, keys with exception + will simply not be included in the output; if "return", all keys are + included in the output, but the value will be bytes or an exception + instance. + kwargs: passed to cat_file + + Returns + ------- + dict of {path: contents} if there are multiple paths + or the path has been otherwise expanded + """ + paths = self.expand_path(path, recursive=recursive) + if ( + len(paths) > 1 + or isinstance(path, list) + or paths[0] != self._strip_protocol(path) + ): + out = {} + for path in paths: + try: + out[path] = self.cat_file(path, **kwargs) + except Exception as e: + if on_error == "raise": + raise + if on_error == "return": + out[path] = e + return out + else: + return self.cat_file(paths[0], **kwargs) + + def get_file( + self, rpath, lpath, callback=_DEFAULT_CALLBACK, outfile=None, **kwargs + ): + """Copy single remote file to local""" + if isfilelike(lpath): + outfile = lpath + else: + if self.isdir(rpath): + os.makedirs(lpath, exist_ok=True) + return None + + if outfile is None: + outfile = open(lpath, "wb") + + with self.open(rpath, "rb", **kwargs) as f1: + callback.set_size(getattr(f1, "size", None)) + data = True + while data: + data = f1.read(self.blocksize) + segment_len = outfile.write(data) + callback.relative_update(segment_len) + if not isfilelike(lpath): + outfile.close() + + def get(self, rpath, lpath, recursive=False, callback=_DEFAULT_CALLBACK, **kwargs): + """Copy file(s) to local. + + Copies a specific file or tree of files (if recursive=True). If lpath + ends with a "/", it will be assumed to be a directory, and target files + will go within. Can submit a list of paths, which may be glob-patterns + and will be expanded. + + Calls get_file for each source. + """ + from .implementations.local import make_path_posix + + if isinstance(lpath, str): + lpath = make_path_posix(lpath) + rpaths = self.expand_path(rpath, recursive=recursive) + lpaths = other_paths(rpaths, lpath) + + callback.set_size(len(lpaths)) + for lpath, rpath in callback.wrap(zip(lpaths, rpaths)): + callback.branch(rpath, lpath, kwargs) + self.get_file(rpath, lpath, **kwargs) + + def put_file(self, lpath, rpath, callback=_DEFAULT_CALLBACK, **kwargs): + """Copy single file to remote""" + if os.path.isdir(lpath): + self.makedirs(rpath, exist_ok=True) + return None + + with open(lpath, "rb") as f1: + size = f1.seek(0, 2) + callback.set_size(size) + f1.seek(0) + + self.mkdirs(self._parent(os.fspath(rpath)), exist_ok=True) + with self.open(rpath, "wb", **kwargs) as f2: + while f1.tell() < size: + data = f1.read(self.blocksize) + segment_len = f2.write(data) + callback.relative_update(segment_len) + + def put(self, lpath, rpath, recursive=False, callback=_DEFAULT_CALLBACK, **kwargs): + """Copy file(s) from local. + + Copies a specific file or tree of files (if recursive=True). If rpath + ends with a "/", it will be assumed to be a directory, and target files + will go within. + + Calls put_file for each source. + """ + from .implementations.local import LocalFileSystem, make_path_posix + + rpath = ( + self._strip_protocol(rpath) + if isinstance(rpath, str) + else [self._strip_protocol(p) for p in rpath] + ) + if isinstance(lpath, str): + lpath = make_path_posix(lpath) + fs = LocalFileSystem() + lpaths = fs.expand_path(lpath, recursive=recursive) + rpaths = other_paths( + lpaths, rpath, exists=isinstance(rpath, str) and self.isdir(rpath) + ) + + callback.set_size(len(rpaths)) + for lpath, rpath in callback.wrap(zip(lpaths, rpaths)): + callback.branch(lpath, rpath, kwargs) + self.put_file(lpath, rpath, **kwargs) + + def head(self, path, size=1024): + """Get the first ``size`` bytes from file""" + with self.open(path, "rb") as f: + return f.read(size) + + def tail(self, path, size=1024): + """Get the last ``size`` bytes from file""" + with self.open(path, "rb") as f: + f.seek(max(-size, -f.size), 2) + return f.read() + + def cp_file(self, path1, path2, **kwargs): + raise NotImplementedError + + def copy(self, path1, path2, recursive=False, on_error=None, **kwargs): + """Copy within two locations in the filesystem + + on_error : "raise", "ignore" + If raise, any not-found exceptions will be raised; if ignore any + not-found exceptions will cause the path to be skipped; defaults to + raise unless recursive is true, where the default is ignore + """ + if on_error is None and recursive: + on_error = "ignore" + elif on_error is None: + on_error = "raise" + + paths = self.expand_path(path1, recursive=recursive) + path2 = other_paths(paths, path2) + for p1, p2 in zip(paths, path2): + try: + self.cp_file(p1, p2, **kwargs) + except FileNotFoundError: + if on_error == "raise": + raise + + def expand_path(self, path, recursive=False, maxdepth=None): + """Turn one or more globs or directories into a list of all matching paths + to files or directories.""" + if isinstance(path, str): + out = self.expand_path([path], recursive, maxdepth) + else: + # reduce depth on each recursion level unless None or 0 + maxdepth = maxdepth if not maxdepth else maxdepth - 1 + out = set() + path = [self._strip_protocol(p) for p in path] + for p in path: + if has_magic(p): + bit = set(self.glob(p)) + out |= bit + if recursive: + out |= set( + self.expand_path( + list(bit), recursive=recursive, maxdepth=maxdepth + ) + ) + continue + elif recursive: + rec = set(self.find(p, maxdepth=maxdepth, withdirs=True)) + out |= rec + if p not in out and (recursive is False or self.exists(p)): + # should only check once, for the root + out.add(p) + if not out: + raise FileNotFoundError(path) + return list(sorted(out)) + + def mv(self, path1, path2, recursive=False, maxdepth=None, **kwargs): + """Move file(s) from one location to another""" + self.copy(path1, path2, recursive=recursive, maxdepth=maxdepth) + self.rm(path1, recursive=recursive) + + def rm_file(self, path): + """Delete a file""" + self._rm(path) + + def _rm(self, path): + """Delete one file""" + # this is the old name for the method, prefer rm_file + raise NotImplementedError + + def rm(self, path, recursive=False, maxdepth=None): + """Delete files. + + Parameters + ---------- + path: str or list of str + File(s) to delete. + recursive: bool + If file(s) are directories, recursively delete contents and then + also remove the directory + maxdepth: int or None + Depth to pass to walk for finding files to delete, if recursive. + If None, there will be no limit and infinite recursion may be + possible. + """ + path = self.expand_path(path, recursive=recursive, maxdepth=maxdepth) + for p in reversed(path): + self.rm_file(p) + + @classmethod + def _parent(cls, path): + path = cls._strip_protocol(path.rstrip("/")) + if "/" in path: + parent = path.rsplit("/", 1)[0].lstrip(cls.root_marker) + return cls.root_marker + parent + else: + return cls.root_marker + + def _open( + self, + path, + mode="rb", + block_size=None, + autocommit=True, + cache_options=None, + **kwargs, + ): + """Return raw bytes-mode file-like from the file-system""" + return AbstractBufferedFile( + self, + path, + mode, + block_size, + autocommit, + cache_options=cache_options, + **kwargs, + ) + + def open( + self, + path, + mode="rb", + block_size=None, + cache_options=None, + compression=None, + **kwargs, + ): + """ + Return a file-like object from the filesystem + + The resultant instance must function correctly in a context ``with`` + block. + + Parameters + ---------- + path: str + Target file + mode: str like 'rb', 'w' + See builtin ``open()`` + block_size: int + Some indication of buffering - this is a value in bytes + cache_options : dict, optional + Extra arguments to pass through to the cache. + compression: string or None + If given, open file using compression codec. Can either be a compression + name (a key in ``fsspec.compression.compr``) or "infer" to guess the + compression from the filename suffix. + encoding, errors, newline: passed on to TextIOWrapper for text mode + """ + import io + + path = self._strip_protocol(path) + if "b" not in mode: + mode = mode.replace("t", "") + "b" + + text_kwargs = { + k: kwargs.pop(k) + for k in ["encoding", "errors", "newline"] + if k in kwargs + } + return io.TextIOWrapper( + self.open( + path, + mode, + block_size=block_size, + cache_options=cache_options, + compression=compression, + **kwargs, + ), + **text_kwargs, + ) + else: + ac = kwargs.pop("autocommit", not self._intrans) + f = self._open( + path, + mode=mode, + block_size=block_size, + autocommit=ac, + cache_options=cache_options, + **kwargs, + ) + if compression is not None: + from fsspec.compression import compr + from fsspec.core import get_compression + + compression = get_compression(path, compression) + compress = compr[compression] + f = compress(f, mode=mode[0]) + + if not ac and "r" not in mode: + self.transaction.files.append(f) + return f + + def touch(self, path, truncate=True, **kwargs): + """Create empty file, or update timestamp + + Parameters + ---------- + path: str + file location + truncate: bool + If True, always set file size to 0; if False, update timestamp and + leave file unchanged, if backend allows this + """ + if truncate or not self.exists(path): + with self.open(path, "wb", **kwargs): + pass + else: + raise NotImplementedError # update timestamp, if possible + + def ukey(self, path): + """Hash of file properties, to tell if it has changed""" + return sha256(str(self.info(path)).encode()).hexdigest() + + def read_block(self, fn, offset, length, delimiter=None): + """Read a block of bytes from + + Starting at ``offset`` of the file, read ``length`` bytes. If + ``delimiter`` is set then we ensure that the read starts and stops at + delimiter boundaries that follow the locations ``offset`` and ``offset + + length``. If ``offset`` is zero then we start at zero. The + bytestring returned WILL include the end delimiter string. + + If offset+length is beyond the eof, reads to eof. + + Parameters + ---------- + fn: string + Path to filename + offset: int + Byte offset to start read + length: int + Number of bytes to read + delimiter: bytes (optional) + Ensure reading starts and stops at delimiter bytestring + + Examples + -------- + >>> fs.read_block('data/file.csv', 0, 13) # doctest: +SKIP + b'Alice, 100\\nBo' + >>> fs.read_block('data/file.csv', 0, 13, delimiter=b'\\n') # doctest: +SKIP + b'Alice, 100\\nBob, 200\\n' + + Use ``length=None`` to read to the end of the file. + >>> fs.read_block('data/file.csv', 0, None, delimiter=b'\\n') # doctest: +SKIP + b'Alice, 100\\nBob, 200\\nCharlie, 300' + + See Also + -------- + utils.read_block + """ + with self.open(fn, "rb") as f: + size = f.size + if length is None: + length = size + if size is not None and offset + length > size: + length = size - offset + return read_block(f, offset, length, delimiter) + + def to_json(self): + """ + JSON representation of this filesystem instance + + Returns + ------- + str: JSON structure with keys cls (the python location of this class), + protocol (text name of this class's protocol, first one in case of + multiple), args (positional args, usually empty), and all other + kwargs as their own keys. + """ + import json + + cls = type(self) + cls = ".".join((cls.__module__, cls.__name__)) + proto = ( + self.protocol[0] + if isinstance(self.protocol, (tuple, list)) + else self.protocol + ) + return json.dumps( + dict( + **{"cls": cls, "protocol": proto, "args": self.storage_args}, + **self.storage_options, + ) + ) + + @staticmethod + def from_json(blob): + """ + Recreate a filesystem instance from JSON representation + + See ``.to_json()`` for the expected structure of the input + + Parameters + ---------- + blob: str + + Returns + ------- + file system instance, not necessarily of this particular class. + """ + import json + + from .registry import _import_class, get_filesystem_class + + dic = json.loads(blob) + protocol = dic.pop("protocol") + try: + cls = _import_class(dic.pop("cls")) + except (ImportError, ValueError, RuntimeError, KeyError): + cls = get_filesystem_class(protocol) + return cls(*dic.pop("args", ()), **dic) + + def _get_pyarrow_filesystem(self): + """ + Make a version of the FS instance which will be acceptable to pyarrow + """ + # all instances already also derive from pyarrow + return self + + def get_mapper(self, root="", check=False, create=False, missing_exceptions=None): + """Create key/value store based on this file-system + + Makes a MutableMapping interface to the FS at the given root path. + See ``fsspec.mapping.FSMap`` for further details. + """ + from .mapping import FSMap + + return FSMap( + root, + self, + check=check, + create=create, + missing_exceptions=missing_exceptions, + ) + + @classmethod + def clear_instance_cache(cls): + """ + Clear the cache of filesystem instances. + + Notes + ----- + Unless overridden by setting the ``cachable`` class attribute to False, + the filesystem class stores a reference to newly created instances. This + prevents Python's normal rules around garbage collection from working, + since the instances refcount will not drop to zero until + ``clear_instance_cache`` is called. + """ + cls._cache.clear() + + def created(self, path): + """Return the created timestamp of a file as a datetime.datetime""" + raise NotImplementedError + + def modified(self, path): + """Return the modified timestamp of a file as a datetime.datetime""" + raise NotImplementedError + + # ------------------------------------------------------------------------ + # Aliases + + def makedir(self, path, create_parents=True, **kwargs): + """Alias of `AbstractFileSystem.mkdir`.""" + return self.mkdir(path, create_parents=create_parents, **kwargs) + + def mkdirs(self, path, exist_ok=False): + """Alias of `AbstractFileSystem.makedirs`.""" + return self.makedirs(path, exist_ok=exist_ok) + + def listdir(self, path, detail=True, **kwargs): + """Alias of `AbstractFileSystem.ls`.""" + return self.ls(path, detail=detail, **kwargs) + + def cp(self, path1, path2, **kwargs): + """Alias of `AbstractFileSystem.copy`.""" + return self.copy(path1, path2, **kwargs) + + def move(self, path1, path2, **kwargs): + """Alias of `AbstractFileSystem.mv`.""" + return self.mv(path1, path2, **kwargs) + + def stat(self, path, **kwargs): + """Alias of `AbstractFileSystem.info`.""" + return self.info(path, **kwargs) + + def disk_usage(self, path, total=True, maxdepth=None, **kwargs): + """Alias of `AbstractFileSystem.du`.""" + return self.du(path, total=total, maxdepth=maxdepth, **kwargs) + + def rename(self, path1, path2, **kwargs): + """Alias of `AbstractFileSystem.mv`.""" + return self.mv(path1, path2, **kwargs) + + def delete(self, path, recursive=False, maxdepth=None): + """Alias of `AbstractFileSystem.rm`.""" + return self.rm(path, recursive=recursive, maxdepth=maxdepth) + + def upload(self, lpath, rpath, recursive=False, **kwargs): + """Alias of `AbstractFileSystem.put`.""" + return self.put(lpath, rpath, recursive=recursive, **kwargs) + + def download(self, rpath, lpath, recursive=False, **kwargs): + """Alias of `AbstractFileSystem.get`.""" + return self.get(rpath, lpath, recursive=recursive, **kwargs) + + def sign(self, path, expiration=100, **kwargs): + """Create a signed URL representing the given path + + Some implementations allow temporary URLs to be generated, as a + way of delegating credentials. + + Parameters + ---------- + path : str + The path on the filesystem + expiration : int + Number of seconds to enable the URL for (if supported) + + Returns + ------- + URL : str + The signed URL + + Raises + ------ + NotImplementedError : if method is not implemented for a filesystem + """ + raise NotImplementedError("Sign is not implemented for this filesystem") + + def _isfilestore(self): + # Originally inherited from pyarrow DaskFileSystem. Keeping this + # here for backwards compatibility as long as pyarrow uses its + # legacy fsspec-compatible filesystems and thus accepts fsspec + # filesystems as well + return False + + +class AbstractBufferedFile(io.IOBase): + """Convenient class to derive from to provide buffering + + In the case that the backend does not provide a pythonic file-like object + already, this class contains much of the logic to build one. The only + methods that need to be overridden are ``_upload_chunk``, + ``_initiate_upload`` and ``_fetch_range``. + """ + + DEFAULT_BLOCK_SIZE = 5 * 2**20 + _details = None + + def __init__( + self, + fs, + path, + mode="rb", + block_size="default", + autocommit=True, + cache_type="readahead", + cache_options=None, + size=None, + **kwargs, + ): + """ + Template for files with buffered reading and writing + + Parameters + ---------- + fs: instance of FileSystem + path: str + location in file-system + mode: str + Normal file modes. Currently only 'wb', 'ab' or 'rb'. Some file + systems may be read-only, and some may not support append. + block_size: int + Buffer size for reading or writing, 'default' for class default + autocommit: bool + Whether to write to final destination; may only impact what + happens when file is being closed. + cache_type: {"readahead", "none", "mmap", "bytes"}, default "readahead" + Caching policy in read mode. See the definitions in ``core``. + cache_options : dict + Additional options passed to the constructor for the cache specified + by `cache_type`. + size: int + If given and in read mode, suppressed having to look up the file size + kwargs: + Gets stored as self.kwargs + """ + from .core import caches + + self.path = path + self.fs = fs + self.mode = mode + self.blocksize = ( + self.DEFAULT_BLOCK_SIZE if block_size in ["default", None] else block_size + ) + self.loc = 0 + self.autocommit = autocommit + self.end = None + self.start = None + self.closed = False + + if cache_options is None: + cache_options = {} + + if "trim" in kwargs: + warnings.warn( + "Passing 'trim' to control the cache behavior has been deprecated. " + "Specify it within the 'cache_options' argument instead.", + FutureWarning, + ) + cache_options["trim"] = kwargs.pop("trim") + + self.kwargs = kwargs + + if mode not in {"ab", "rb", "wb"}: + raise NotImplementedError("File mode not supported") + if mode == "rb": + if size is not None: + self.size = size + else: + self.size = self.details["size"] + self.cache = caches[cache_type]( + self.blocksize, self._fetch_range, self.size, **cache_options + ) + else: + self.buffer = io.BytesIO() + self.offset = None + self.forced = False + self.location = None + + @property + def details(self): + if self._details is None: + self._details = self.fs.info(self.path) + return self._details + + @details.setter + def details(self, value): + self._details = value + self.size = value["size"] + + @property + def full_name(self): + return _unstrip_protocol(self.path, self.fs) + + @property + def closed(self): + # get around this attr being read-only in IOBase + # use getattr here, since this can be called during del + return getattr(self, "_closed", True) + + @closed.setter + def closed(self, c): + self._closed = c + + def __hash__(self): + if "w" in self.mode: + return id(self) + else: + return int(tokenize(self.details), 16) + + def __eq__(self, other): + """Files are equal if they have the same checksum, only in read mode""" + return self.mode == "rb" and other.mode == "rb" and hash(self) == hash(other) + + def commit(self): + """Move from temp to final destination""" + + def discard(self): + """Throw away temporary file""" + + def info(self): + """File information about this path""" + if "r" in self.mode: + return self.details + else: + raise ValueError("Info not available while writing") + + def tell(self): + """Current file location""" + return self.loc + + def seek(self, loc, whence=0): + """Set current file location + + Parameters + ---------- + loc: int + byte location + whence: {0, 1, 2} + from start of file, current location or end of file, resp. + """ + loc = int(loc) + if not self.mode == "rb": + raise OSError(ESPIPE, "Seek only available in read mode") + if whence == 0: + nloc = loc + elif whence == 1: + nloc = self.loc + loc + elif whence == 2: + nloc = self.size + loc + else: + raise ValueError("invalid whence (%s, should be 0, 1 or 2)" % whence) + if nloc < 0: + raise ValueError("Seek before start of file") + self.loc = nloc + return self.loc + + def write(self, data): + """ + Write data to buffer. + + Buffer only sent on flush() or if buffer is greater than + or equal to blocksize. + + Parameters + ---------- + data: bytes + Set of bytes to be written. + """ + if self.mode not in {"wb", "ab"}: + raise ValueError("File not in write mode") + if self.closed: + raise ValueError("I/O operation on closed file.") + if self.forced: + raise ValueError("This file has been force-flushed, can only close") + out = self.buffer.write(data) + self.loc += out + if self.buffer.tell() >= self.blocksize: + self.flush() + return out + + def flush(self, force=False): + """ + Write buffered data to backend store. + + Writes the current buffer, if it is larger than the block-size, or if + the file is being closed. + + Parameters + ---------- + force: bool + When closing, write the last block even if it is smaller than + blocks are allowed to be. Disallows further writing to this file. + """ + + if self.closed: + raise ValueError("Flush on closed file") + if force and self.forced: + raise ValueError("Force flush cannot be called more than once") + if force: + self.forced = True + + if self.mode not in {"wb", "ab"}: + # no-op to flush on read-mode + return + + if not force and self.buffer.tell() < self.blocksize: + # Defer write on small block + return + + if self.offset is None: + # Initialize a multipart upload + self.offset = 0 + try: + self._initiate_upload() + except: # noqa: E722 + self.closed = True + raise + + if self._upload_chunk(final=force) is not False: + self.offset += self.buffer.seek(0, 2) + self.buffer = io.BytesIO() + + def _upload_chunk(self, final=False): + """Write one part of a multi-block file upload + + Parameters + ========== + final: bool + This is the last block, so should complete file, if + self.autocommit is True. + """ + # may not yet have been initialized, may need to call _initialize_upload + + def _initiate_upload(self): + """Create remote file/upload""" + pass + + def _fetch_range(self, start, end): + """Get the specified set of bytes from remote""" + raise NotImplementedError + + def read(self, length=-1): + """ + Return data from cache, or fetch pieces as necessary + + Parameters + ---------- + length: int (-1) + Number of bytes to read; if <0, all remaining bytes. + """ + length = -1 if length is None else int(length) + if self.mode != "rb": + raise ValueError("File not in read mode") + if length < 0: + length = self.size - self.loc + if self.closed: + raise ValueError("I/O operation on closed file.") + logger.debug("%s read: %i - %i" % (self, self.loc, self.loc + length)) + if length == 0: + # don't even bother calling fetch + return b"" + out = self.cache._fetch(self.loc, self.loc + length) + self.loc += len(out) + return out + + def readinto(self, b): + """mirrors builtin file's readinto method + + https://docs.python.org/3/library/io.html#io.RawIOBase.readinto + """ + out = memoryview(b).cast("B") + data = self.read(out.nbytes) + out[: len(data)] = data + return len(data) + + def readuntil(self, char=b"\n", blocks=None): + """Return data between current position and first occurrence of char + + char is included in the output, except if the end of the tile is + encountered first. + + Parameters + ---------- + char: bytes + Thing to find + blocks: None or int + How much to read in each go. Defaults to file blocksize - which may + mean a new read on every call. + """ + out = [] + while True: + start = self.tell() + part = self.read(blocks or self.blocksize) + if len(part) == 0: + break + found = part.find(char) + if found > -1: + out.append(part[: found + len(char)]) + self.seek(start + found + len(char)) + break + out.append(part) + return b"".join(out) + + def readline(self): + """Read until first occurrence of newline character + + Note that, because of character encoding, this is not necessarily a + true line ending. + """ + return self.readuntil(b"\n") + + def __next__(self): + out = self.readline() + if out: + return out + raise StopIteration + + def __iter__(self): + return self + + def readlines(self): + """Return all data, split by the newline character""" + data = self.read() + lines = data.split(b"\n") + out = [l + b"\n" for l in lines[:-1]] + if data.endswith(b"\n"): + return out + else: + return out + [lines[-1]] + # return list(self) ??? + + def readinto1(self, b): + return self.readinto(b) + + def close(self): + """Close file + + Finalizes writes, discards cache + """ + if getattr(self, "_unclosable", False): + return + if self.closed: + return + if self.mode == "rb": + self.cache = None + else: + if not self.forced: + self.flush(force=True) + + if self.fs is not None: + self.fs.invalidate_cache(self.path) + self.fs.invalidate_cache(self.fs._parent(self.path)) + + self.closed = True + + def readable(self): + """Whether opened for reading""" + return self.mode == "rb" and not self.closed + + def seekable(self): + """Whether is seekable (only in read mode)""" + return self.readable() + + def writable(self): + """Whether opened for writing""" + return self.mode in {"wb", "ab"} and not self.closed + + def __del__(self): + if not self.closed: + self.close() + + def __str__(self): + return "" % (type(self.fs).__name__, self.path) + + __repr__ = __str__ + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/transaction.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/transaction.py new file mode 100644 index 0000000000000000000000000000000000000000..df98353d5754fc6b82a6d06d80b87e45ed698f1f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/transaction.py @@ -0,0 +1,81 @@ +class Transaction(object): + """Filesystem transaction write context + + Gathers files for deferred commit or discard, so that several write + operations can be finalized semi-atomically. This works by having this + instance as the ``.transaction`` attribute of the given filesystem + """ + + def __init__(self, fs): + """ + Parameters + ---------- + fs: FileSystem instance + """ + self.fs = fs + self.files = [] + + def __enter__(self): + self.start() + + def __exit__(self, exc_type, exc_val, exc_tb): + """End transaction and commit, if exit is not due to exception""" + # only commit if there was no exception + self.complete(commit=exc_type is None) + self.fs._intrans = False + self.fs._transaction = None + + def start(self): + """Start a transaction on this FileSystem""" + self.files = [] # clean up after previous failed completions + self.fs._intrans = True + + def complete(self, commit=True): + """Finish transaction: commit or discard all deferred files""" + for f in self.files: + if commit: + f.commit() + else: + f.discard() + self.files = [] + self.fs._intrans = False + + +class FileActor(object): + def __init__(self): + self.files = [] + + def commit(self): + for f in self.files: + f.commit() + self.files.clear() + + def discard(self): + for f in self.files: + f.discard() + self.files.clear() + + def append(self, f): + self.files.append(f) + + +class DaskTransaction(Transaction): + def __init__(self, fs): + """ + Parameters + ---------- + fs: FileSystem instance + """ + import distributed + + super().__init__(fs) + client = distributed.default_client() + self.files = client.submit(FileActor, actor=True).result() + + def complete(self, commit=True): + """Finish transaction: commit or discard all deferred files""" + if commit: + self.files.commit().result() + else: + self.files.discard().result() + self.fs._intrans = False diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/utils.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e35fe737e9dba5a31591a7651c3f3b4da0071dd2 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/fsspec/utils.py @@ -0,0 +1,621 @@ +import logging +import math +import os +import pathlib +import re +import sys +from contextlib import contextmanager +from functools import partial +from hashlib import md5 +from types import TracebackType +from typing import IO, AnyStr, Callable, Iterable, Iterator, List, Optional, Type +from urllib.parse import urlsplit + +DEFAULT_BLOCK_SIZE = 5 * 2**20 + + +def infer_storage_options(urlpath, inherit_storage_options=None): + """Infer storage options from URL path and merge it with existing storage + options. + + Parameters + ---------- + urlpath: str or unicode + Either local absolute file path or URL (hdfs://namenode:8020/file.csv) + inherit_storage_options: dict (optional) + Its contents will get merged with the inferred information from the + given path + + Returns + ------- + Storage options dict. + + Examples + -------- + >>> infer_storage_options('/mnt/datasets/test.csv') # doctest: +SKIP + {"protocol": "file", "path", "/mnt/datasets/test.csv"} + >>> infer_storage_options( + ... 'hdfs://username:pwd@node:123/mnt/datasets/test.csv?q=1', + ... inherit_storage_options={'extra': 'value'}, + ... ) # doctest: +SKIP + {"protocol": "hdfs", "username": "username", "password": "pwd", + "host": "node", "port": 123, "path": "/mnt/datasets/test.csv", + "url_query": "q=1", "extra": "value"} + """ + # Handle Windows paths including disk name in this special case + if ( + re.match(r"^[a-zA-Z]:[\\/]", urlpath) + or re.match(r"^[a-zA-Z0-9]+://", urlpath) is None + ): + return {"protocol": "file", "path": urlpath} + + parsed_path = urlsplit(urlpath) + protocol = parsed_path.scheme or "file" + if parsed_path.fragment: + path = "#".join([parsed_path.path, parsed_path.fragment]) + else: + path = parsed_path.path + if protocol == "file": + # Special case parsing file protocol URL on Windows according to: + # https://msdn.microsoft.com/en-us/library/jj710207.aspx + windows_path = re.match(r"^/([a-zA-Z])[:|]([\\/].*)$", path) + if windows_path: + path = "%s:%s" % windows_path.groups() + + if protocol in ["http", "https"]: + # for HTTP, we don't want to parse, as requests will anyway + return {"protocol": protocol, "path": urlpath} + + options = {"protocol": protocol, "path": path} + + if parsed_path.netloc: + # Parse `hostname` from netloc manually because `parsed_path.hostname` + # lowercases the hostname which is not always desirable (e.g. in S3): + # https://github.com/dask/dask/issues/1417 + options["host"] = parsed_path.netloc.rsplit("@", 1)[-1].rsplit(":", 1)[0] + + if protocol in ("s3", "s3a", "gcs", "gs"): + options["path"] = options["host"] + options["path"] + else: + options["host"] = options["host"] + if parsed_path.port: + options["port"] = parsed_path.port + if parsed_path.username: + options["username"] = parsed_path.username + if parsed_path.password: + options["password"] = parsed_path.password + + if parsed_path.query: + options["url_query"] = parsed_path.query + if parsed_path.fragment: + options["url_fragment"] = parsed_path.fragment + + if inherit_storage_options: + update_storage_options(options, inherit_storage_options) + + return options + + +def update_storage_options(options, inherited=None): + if not inherited: + inherited = {} + collisions = set(options) & set(inherited) + if collisions: + for collision in collisions: + if options.get(collision) != inherited.get(collision): + raise KeyError( + "Collision between inferred and specified storage " + "option:\n%s" % collision + ) + options.update(inherited) + + +# Compression extensions registered via fsspec.compression.register_compression +compressions = {} + + +def infer_compression(filename): + """Infer compression, if available, from filename. + + Infer a named compression type, if registered and available, from filename + extension. This includes builtin (gz, bz2, zip) compressions, as well as + optional compressions. See fsspec.compression.register_compression. + """ + extension = os.path.splitext(filename)[-1].strip(".").lower() + if extension in compressions: + return compressions[extension] + + +def build_name_function(max_int): + """Returns a function that receives a single integer + and returns it as a string padded by enough zero characters + to align with maximum possible integer + + >>> name_f = build_name_function(57) + + >>> name_f(7) + '07' + >>> name_f(31) + '31' + >>> build_name_function(1000)(42) + '0042' + >>> build_name_function(999)(42) + '042' + >>> build_name_function(0)(0) + '0' + """ + # handle corner cases max_int is 0 or exact power of 10 + max_int += 1e-8 + + pad_length = int(math.ceil(math.log10(max_int))) + + def name_function(i): + return str(i).zfill(pad_length) + + return name_function + + +def seek_delimiter(file, delimiter, blocksize): + r"""Seek current file to file start, file end, or byte after delimiter seq. + + Seeks file to next chunk delimiter, where chunks are defined on file start, + a delimiting sequence, and file end. Use file.tell() to see location afterwards. + Note that file start is a valid split, so must be at offset > 0 to seek for + delimiter. + + Parameters + ---------- + file: a file + delimiter: bytes + a delimiter like ``b'\n'`` or message sentinel, matching file .read() type + blocksize: int + Number of bytes to read from the file at once. + + + Returns + ------- + Returns True if a delimiter was found, False if at file start or end. + + """ + + if file.tell() == 0: + # beginning-of-file, return without seek + return False + + # Interface is for binary IO, with delimiter as bytes, but initialize last + # with result of file.read to preserve compatibility with text IO. + last = None + while True: + current = file.read(blocksize) + if not current: + # end-of-file without delimiter + return False + full = last + current if last else current + try: + if delimiter in full: + i = full.index(delimiter) + file.seek(file.tell() - (len(full) - i) + len(delimiter)) + return True + elif len(current) < blocksize: + # end-of-file without delimiter + return False + except (OSError, ValueError): + pass + last = full[-len(delimiter) :] + + +def read_block(f, offset, length, delimiter=None, split_before=False): + """Read a block of bytes from a file + + Parameters + ---------- + f: File + Open file + offset: int + Byte offset to start read + length: int + Number of bytes to read, read through end of file if None + delimiter: bytes (optional) + Ensure reading starts and stops at delimiter bytestring + split_before: bool (optional) + Start/stop read *before* delimiter bytestring. + + + If using the ``delimiter=`` keyword argument we ensure that the read + starts and stops at delimiter boundaries that follow the locations + ``offset`` and ``offset + length``. If ``offset`` is zero then we + start at zero, regardless of delimiter. The bytestring returned WILL + include the terminating delimiter string. + + Examples + -------- + + >>> from io import BytesIO # doctest: +SKIP + >>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP + >>> read_block(f, 0, 13) # doctest: +SKIP + b'Alice, 100\\nBo' + + >>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP + b'Alice, 100\\nBob, 200\\n' + + >>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP + b'Bob, 200\\nCharlie, 300' + """ + if delimiter: + f.seek(offset) + found_start_delim = seek_delimiter(f, delimiter, 2**16) + if length is None: + return f.read() + start = f.tell() + length -= start - offset + + f.seek(start + length) + found_end_delim = seek_delimiter(f, delimiter, 2**16) + end = f.tell() + + # Adjust split location to before delimiter iff seek found the + # delimiter sequence, not start or end of file. + if found_start_delim and split_before: + start -= len(delimiter) + + if found_end_delim and split_before: + end -= len(delimiter) + + offset = start + length = end - start + + f.seek(offset) + b = f.read(length) + return b + + +def tokenize(*args, **kwargs): + """Deterministic token + + (modified from dask.base) + + >>> tokenize([1, 2, '3']) + '9d71491b50023b06fc76928e6eddb952' + + >>> tokenize('Hello') == tokenize('Hello') + True + """ + if kwargs: + args += (kwargs,) + try: + return md5(str(args).encode()).hexdigest() + except ValueError: + # FIPS systems: https://github.com/fsspec/filesystem_spec/issues/380 + return md5(str(args).encode(), usedforsecurity=False).hexdigest() + + +def stringify_path(filepath): + """Attempt to convert a path-like object to a string. + + Parameters + ---------- + filepath: object to be converted + + Returns + ------- + filepath_str: maybe a string version of the object + + Notes + ----- + Objects supporting the fspath protocol are coerced according to its + __fspath__ method. + + For backwards compatibility with older Python version, pathlib.Path + objects are specially coerced. + + Any other object is passed through unchanged, which includes bytes, + strings, buffers, or anything else that's not even path-like. + """ + if isinstance(filepath, str): + return filepath + elif hasattr(filepath, "__fspath__"): + return filepath.__fspath__() + elif isinstance(filepath, pathlib.Path): + return str(filepath) + elif hasattr(filepath, "path"): + return filepath.path + else: + return filepath + + +def make_instance(cls, args, kwargs): + inst = cls(*args, **kwargs) + inst._determine_worker() + return inst + + +def common_prefix(paths): + """For a list of paths, find the shortest prefix common to all""" + parts = [p.split("/") for p in paths] + lmax = min(len(p) for p in parts) + end = 0 + for i in range(lmax): + end = all(p[i] == parts[0][i] for p in parts) + if not end: + break + i += end + return "/".join(parts[0][:i]) + + +def other_paths(paths, path2, is_dir=None, exists=False): + """In bulk file operations, construct a new file tree from a list of files + + Parameters + ---------- + paths: list of str + The input file tree + path2: str or list of str + Root to construct the new list in. If this is already a list of str, we just + assert it has the right number of elements. + is_dir: bool (optional) + For the special case where the input in one element, whether to regard the value + as the target path, or as a directory to put a file path within. If None, a + directory is inferred if the path ends in '/' + exists: bool (optional) + For a str destination, it is already exists (and is a dir), files should + end up inside. + + Returns + ------- + list of str + """ + if isinstance(path2, str): + is_dir = is_dir or path2.endswith("/") + path2 = path2.rstrip("/") + if len(paths) > 1: + cp = common_prefix(paths) + if exists: + cp = cp.rsplit("/", 1)[0] + path2 = [p.replace(cp, path2, 1) for p in paths] + else: + if is_dir: + path2 = [path2.rstrip("/") + "/" + paths[0].rsplit("/")[-1]] + else: + path2 = [path2] + else: + assert len(paths) == len(path2) + return path2 + + +def is_exception(obj): + return isinstance(obj, BaseException) + + +def isfilelike(f): + for attr in ["read", "close", "tell"]: + if not hasattr(f, attr): + return False + return True + + +def get_protocol(url): + parts = re.split(r"(\:\:|\://)", url, 1) + if len(parts) > 1: + return parts[0] + return "file" + + +def can_be_local(path): + """Can the given URL be used with open_local?""" + from fsspec import get_filesystem_class + + try: + return getattr(get_filesystem_class(get_protocol(path)), "local_file", False) + except (ValueError, ImportError): + # not in registry or import failed + return False + + +def get_package_version_without_import(name): + """For given package name, try to find the version without importing it + + Import and package.__version__ is still the backup here, so an import + *might* happen. + + Returns either the version string, or None if the package + or the version was not readily found. + """ + if name in sys.modules: + mod = sys.modules[name] + if hasattr(mod, "__version__"): + return mod.__version__ + if sys.version_info >= (3, 8): + try: + import importlib.metadata + + return importlib.metadata.distribution(name).version + except: # noqa: E722 + pass + else: + try: + import importlib_metadata + + return importlib_metadata.distribution(name).version + except: # noqa: E722 + pass + try: + import importlib + + mod = importlib.import_module(name) + return mod.__version__ + except (ImportError, AttributeError): + return None + + +def setup_logging(logger=None, logger_name=None, level="DEBUG", clear=True): + if logger is None and logger_name is None: + raise ValueError("Provide either logger object or logger name") + logger = logger or logging.getLogger(logger_name) + handle = logging.StreamHandler() + formatter = logging.Formatter( + "%(asctime)s - %(name)s - %(levelname)s - %(funcName)s -- %(message)s" + ) + handle.setFormatter(formatter) + if clear: + logger.handlers.clear() + logger.addHandler(handle) + logger.setLevel(level) + return logger + + +def _unstrip_protocol(name, fs): + return fs.unstrip_protocol(name) + + +def mirror_from(origin_name, methods): + """Mirror attributes and methods from the given + origin_name attribute of the instance to the + decorated class""" + + def origin_getter(method, self): + origin = getattr(self, origin_name) + return getattr(origin, method) + + def wrapper(cls): + for method in methods: + wrapped_method = partial(origin_getter, method) + setattr(cls, method, property(wrapped_method)) + return cls + + return wrapper + + +@contextmanager +def nullcontext(obj): + yield obj + + +def merge_offset_ranges(paths, starts, ends, max_gap=0, max_block=None, sort=True): + """Merge adjacent byte-offset ranges when the inter-range + gap is <= `max_gap`, and when the merged byte range does not + exceed `max_block` (if specified). By default, this function + will re-order the input paths and byte ranges to ensure sorted + order. If the user can guarantee that the inputs are already + sorted, passing `sort=False` will skip the re-ordering. + """ + + # Check input + if not isinstance(paths, list): + raise TypeError + if not isinstance(starts, list): + starts = [starts] * len(paths) + if not isinstance(ends, list): + ends = [starts] * len(paths) + if len(starts) != len(paths) or len(ends) != len(paths): + raise ValueError + + # Early Return + if len(starts) <= 1: + return paths, starts, ends + + # Sort by paths and then ranges if `sort=True` + if sort: + paths, starts, ends = [list(v) for v in zip(*sorted(zip(paths, starts, ends)))] + + if paths: + # Loop through the coupled `paths`, `starts`, and + # `ends`, and merge adjacent blocks when appropriate + new_paths = paths[:1] + new_starts = starts[:1] + new_ends = ends[:1] + for i in range(1, len(paths)): + if ( + paths[i] != paths[i - 1] + or ((starts[i] - new_ends[-1]) > max_gap) + or ((max_block is not None and (ends[i] - new_starts[-1]) > max_block)) + ): + # Cannot merge with previous block. + # Add new `paths`, `starts`, and `ends` elements + new_paths.append(paths[i]) + new_starts.append(starts[i]) + new_ends.append(ends[i]) + else: + # Merge with previous block by updating the + # last element of `ends` + new_ends[-1] = ends[i] + return new_paths, new_starts, new_ends + + # `paths` is empty. Just return input lists + return paths, starts, ends + + +class IOWrapper(IO): + """Wrapper for a file-like object that can be used in situations where we might + want to, e.g., monkey-patch the close method but can't. + (cf https://github.com/fsspec/filesystem_spec/issues/725) + """ + + def __init__(self, fp: IO, closer: Callable[[], None]): + self.fp = fp + self.closer = closer + + def close(self) -> None: + self.fp.close() + + def fileno(self) -> int: + return self.fp.fileno() + + def flush(self) -> None: + self.fp.flush() + + def isatty(self) -> bool: + return self.fp.isatty() + + def read(self, n: int = ...) -> AnyStr: + return self.fp.read(n) + + def readable(self) -> bool: + return self.fp.readable() + + def readline(self, limit: int = ...) -> AnyStr: + return self.fp.readline(limit) + + def readlines(self, hint: int = ...) -> List[AnyStr]: + return self.fp.readlines(hint) + + def seek(self, offset: int, whence: int = ...) -> int: + return self.fp.seek(offset, whence) + + def seekable(self) -> bool: + return self.fp.seekable() + + def tell(self) -> int: + return self.fp.tell() + + def truncate(self, size: Optional[int] = ...) -> int: + return self.fp.truncate(size) + + def writable(self) -> bool: + return self.fp.writable() + + def write(self, s: AnyStr) -> int: + return self.fp.write(s) + + def writelines(self, lines: Iterable[AnyStr]) -> None: + self.fp.writelines(lines) + + def __next__(self) -> AnyStr: + return next(self.fp) + + def __iter__(self) -> Iterator[AnyStr]: + return iter(self.fp) + + def __enter__(self) -> IO[AnyStr]: + return self.fp.__enter__() + + def __exit__( + self, + t: Optional[Type[BaseException]], + value: Optional[BaseException], + traceback: Optional[TracebackType], + ) -> Optional[bool]: + return self.fp.__exit__(t, value, traceback) + + # forward anything else too + def __getattr__(self, name): + return getattr(self.fp, name) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/future-0.18.2.dist-info/LICENSE.txt b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/future-0.18.2.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..4c904dba8fe259531dcb46bd5723d05a19de6bff --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/future-0.18.2.dist-info/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2013-2019 Python Charmers Pty Ltd, Australia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/future-0.18.2.dist-info/METADATA b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/future-0.18.2.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..4ba0cbcc4a59b83c293477635e1f3efd4933e26a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/future-0.18.2.dist-info/METADATA @@ -0,0 +1,107 @@ +Metadata-Version: 2.1 +Name: future +Version: 0.18.2 +Summary: Clean single-source support for Python 3 and 2 +Home-page: https://python-future.org +Author: Ed Schofield +Author-email: ed@pythoncharmers.com +License: MIT +Keywords: future past python3 migration futurize backport six 2to3 modernize pasteurize 3to2 +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: License :: OSI Approved +Classifier: License :: OSI Approved :: MIT License +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Requires-Python: >=2.6, !=3.0.*, !=3.1.*, !=3.2.* +License-File: LICENSE.txt + + +future: Easy, safe support for Python 2/3 compatibility +======================================================= + +``future`` is the missing compatibility layer between Python 2 and Python +3. It allows you to use a single, clean Python 3.x-compatible codebase to +support both Python 2 and Python 3 with minimal overhead. + +It is designed to be used as follows:: + + from __future__ import (absolute_import, division, + print_function, unicode_literals) + from builtins import ( + bytes, dict, int, list, object, range, str, + ascii, chr, hex, input, next, oct, open, + pow, round, super, + filter, map, zip) + +followed by predominantly standard, idiomatic Python 3 code that then runs +similarly on Python 2.6/2.7 and Python 3.3+. + +The imports have no effect on Python 3. On Python 2, they shadow the +corresponding builtins, which normally have different semantics on Python 3 +versus 2, to provide their Python 3 semantics. + + +Standard library reorganization +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``future`` supports the standard library reorganization (PEP 3108) through the +following Py3 interfaces: + + >>> # Top-level packages with Py3 names provided on Py2: + >>> import html.parser + >>> import queue + >>> import tkinter.dialog + >>> import xmlrpc.client + >>> # etc. + + >>> # Aliases provided for extensions to existing Py2 module names: + >>> from future.standard_library import install_aliases + >>> install_aliases() + + >>> from collections import Counter, OrderedDict # backported to Py2.6 + >>> from collections import UserDict, UserList, UserString + >>> import urllib.request + >>> from itertools import filterfalse, zip_longest + >>> from subprocess import getoutput, getstatusoutput + + +Automatic conversion +-------------------- + +An included script called `futurize +`_ aids in converting +code (from either Python 2 or Python 3) to code compatible with both +platforms. It is similar to ``python-modernize`` but goes further in +providing Python 3 compatibility through the use of the backported types +and builtin functions in ``future``. + + +Documentation +------------- + +See: http://python-future.org + + +Credits +------- + +:Author: Ed Schofield, Jordan M. Adler, et al +:Sponsor: Python Charmers Pty Ltd, Australia, and Python Charmers Pte + Ltd, Singapore. http://pythoncharmers.com +:Others: See docs/credits.rst or http://python-future.org/credits.html + + +Licensing +--------- +Copyright 2013-2019 Python Charmers Pty Ltd, Australia. +The software is distributed under an MIT licence. See LICENSE.txt. + diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/future-0.18.2.dist-info/RECORD b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/future-0.18.2.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..bafc16469824b0ae5734402c8603144b04596b44 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/future-0.18.2.dist-info/RECORD @@ -0,0 +1,416 @@ +../../../bin/futurize,sha256=xKA_N8JYnXCyzVBfTUkz9pU3eXH5d8c16svN3USrvM4,233 +../../../bin/pasteurize,sha256=GLIzOFPYAgHM9EHgTergrPuJuATuCzdvhzyepiNMEz8,235 +future-0.18.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +future-0.18.2.dist-info/LICENSE.txt,sha256=kW5WE5LUhHG5wjQ39W4mUvMgyzsRnOqhYu30EBb3Rrk,1083 +future-0.18.2.dist-info/METADATA,sha256=TpD2d2mu5U0tn4-xlVPIWgr_ND-bC8Bp0OSqjDXk2rw,3709 +future-0.18.2.dist-info/RECORD,, +future-0.18.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +future-0.18.2.dist-info/WHEEL,sha256=OqRkF0eY5GHssMorFjlbTIq072vpHpF60fIQA6lS9xA,92 +future-0.18.2.dist-info/entry_points.txt,sha256=U9LtP60KSNXoj58mzV5TbotBF371gTWzrKrzJIH80Kw,88 +future-0.18.2.dist-info/top_level.txt,sha256=DT0C3az2gb-uJaj-fs0h4WwHYlJVDp0EvLdud1y5Zyw,38 +future/__init__.py,sha256=TsDq1XoGk6Jfach_rEhwAi07zR5OKYZ6hhUlG5Bj6Ag,2991 +future/__pycache__/__init__.cpython-38.pyc,, +future/backports/__init__.py,sha256=5QXvQ_jc5Xx6p4dSaHnZXPZazBEunKDKhbUjxZ0XD1I,530 +future/backports/__pycache__/__init__.cpython-38.pyc,, +future/backports/__pycache__/_markupbase.cpython-38.pyc,, +future/backports/__pycache__/datetime.cpython-38.pyc,, +future/backports/__pycache__/misc.cpython-38.pyc,, +future/backports/__pycache__/socket.cpython-38.pyc,, +future/backports/__pycache__/socketserver.cpython-38.pyc,, +future/backports/__pycache__/total_ordering.cpython-38.pyc,, +future/backports/_markupbase.py,sha256=MDPTCykLq4J7Aea3PvYotATEE0CG4R_SjlxfJaLXTJM,16215 +future/backports/datetime.py,sha256=I214Vu0cRY8mi8J5aIcsAyQJnWmOKXeLV-QTWSn7VQU,75552 +future/backports/email/__init__.py,sha256=eH3AJr3FkuBy_D6yS1V2K76Q2CQ93y2zmAMWmn8FbHI,2269 +future/backports/email/__pycache__/__init__.cpython-38.pyc,, +future/backports/email/__pycache__/_encoded_words.cpython-38.pyc,, +future/backports/email/__pycache__/_header_value_parser.cpython-38.pyc,, +future/backports/email/__pycache__/_parseaddr.cpython-38.pyc,, +future/backports/email/__pycache__/_policybase.cpython-38.pyc,, +future/backports/email/__pycache__/base64mime.cpython-38.pyc,, +future/backports/email/__pycache__/charset.cpython-38.pyc,, +future/backports/email/__pycache__/encoders.cpython-38.pyc,, +future/backports/email/__pycache__/errors.cpython-38.pyc,, +future/backports/email/__pycache__/feedparser.cpython-38.pyc,, +future/backports/email/__pycache__/generator.cpython-38.pyc,, +future/backports/email/__pycache__/header.cpython-38.pyc,, +future/backports/email/__pycache__/headerregistry.cpython-38.pyc,, +future/backports/email/__pycache__/iterators.cpython-38.pyc,, +future/backports/email/__pycache__/message.cpython-38.pyc,, +future/backports/email/__pycache__/parser.cpython-38.pyc,, +future/backports/email/__pycache__/policy.cpython-38.pyc,, +future/backports/email/__pycache__/quoprimime.cpython-38.pyc,, +future/backports/email/__pycache__/utils.cpython-38.pyc,, +future/backports/email/_encoded_words.py,sha256=m1vTRfxAQdg4VyWO7PF-1ih1mmq97V-BPyHHkuEwSME,8443 +future/backports/email/_header_value_parser.py,sha256=cj_1ce1voLn8H98r9cKqiSLgfFSxCv3_UL3sSvjqgjk,104692 +future/backports/email/_parseaddr.py,sha256=KewEnos0YDM-SYX503z7E1MmVbG5VRaKjxjcl0Ipjbs,17389 +future/backports/email/_policybase.py,sha256=2lJD9xouiz4uHvWGQ6j1nwlwWVQGwwzpy5JZoeQqhUc,14647 +future/backports/email/base64mime.py,sha256=sey6iJA9pHIOdFgoV1p7QAwYVjt8CEkDhITt304-nyI,3729 +future/backports/email/charset.py,sha256=CfE4iV2zAq6MQC0CHXHLnwTNW71zmhNITbzOcfxE4vY,17439 +future/backports/email/encoders.py,sha256=Nn4Pcx1rOdRgoSIzB6T5RWHl5zxClbf32wgE6D0tUt8,2800 +future/backports/email/errors.py,sha256=tRX8PP5g7mk2bAxL1jTCYrbfhD2gPZFNrh4_GJRM8OQ,3680 +future/backports/email/feedparser.py,sha256=bvmhb4cdY-ipextPK2K2sDgMsNvTspmuQfYyCxc4zSc,22736 +future/backports/email/generator.py,sha256=lpaLhZHneguvZ2QgRu7Figkjb7zmY28AGhj9iZTdI7s,19520 +future/backports/email/header.py,sha256=uBHbNKO-yx5I9KBflernJpyy3fX4gImCB1QE7ICApLs,24448 +future/backports/email/headerregistry.py,sha256=ZPbvLKXD0NMLSU4jXlVHfGyGcLMrFm-GQVURu_XHj88,20637 +future/backports/email/iterators.py,sha256=kMRYFGy3SVVpo7HG7JJr2ZAlOoaX6CVPzKYwDSvLfV0,2348 +future/backports/email/message.py,sha256=I6WW5cZDza7uwLOGJSvsDhGZC9K_Q570Lk2gt_vDUXM,35237 +future/backports/email/mime/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +future/backports/email/mime/__pycache__/__init__.cpython-38.pyc,, +future/backports/email/mime/__pycache__/application.cpython-38.pyc,, +future/backports/email/mime/__pycache__/audio.cpython-38.pyc,, +future/backports/email/mime/__pycache__/base.cpython-38.pyc,, +future/backports/email/mime/__pycache__/image.cpython-38.pyc,, +future/backports/email/mime/__pycache__/message.cpython-38.pyc,, +future/backports/email/mime/__pycache__/multipart.cpython-38.pyc,, +future/backports/email/mime/__pycache__/nonmultipart.cpython-38.pyc,, +future/backports/email/mime/__pycache__/text.cpython-38.pyc,, +future/backports/email/mime/application.py,sha256=m-5a4mSxu2E32XAImnp9x9eMVX5Vme2iNgn2dMMNyss,1401 +future/backports/email/mime/audio.py,sha256=2ognalFRadcsUYQYMUZbjv5i1xJbFhQN643doMuI7M4,2815 +future/backports/email/mime/base.py,sha256=wV3ClQyMsOqmkXSXbk_wd_zPoPTvBx8kAIzq3rdM4lE,875 +future/backports/email/mime/image.py,sha256=DpQk1sB-IMmO43AF4uadsXyf_y5TdEzJLfyhqR48bIw,1907 +future/backports/email/mime/message.py,sha256=pFsMhXW07aRjsLq1peO847PApWFAl28-Z2Z7BP1Dn74,1429 +future/backports/email/mime/multipart.py,sha256=j4Lf_sJmuwTbfgdQ6R35_t1_ha2DynJBJDvpjwbNObE,1699 +future/backports/email/mime/nonmultipart.py,sha256=Ciba1Z8d2yLDDpxgDJuk3Bb-TqcpE9HCd8KfbW5vgl4,832 +future/backports/email/mime/text.py,sha256=zV98BjoR4S_nX8c47x43LnsnifeGhIfNGwSAh575bs0,1552 +future/backports/email/parser.py,sha256=-115SC3DHZ6lLijWFTxuOnE-GiM2BOYaUSz-QpmvYSo,5312 +future/backports/email/policy.py,sha256=gpcbhVRXuCohkK6MUqopTs1lv4E4-ZVUO6OVncoGEJE,8823 +future/backports/email/quoprimime.py,sha256=w93W5XgdFpyGaDqDBJrnXF_v_npH5r20WuAxmrAzyQg,10923 +future/backports/email/utils.py,sha256=vpfN0E8UjNbNw-2NFBQGCo4TNgrghMsqzpEYW5C_fBs,14270 +future/backports/html/__init__.py,sha256=FKwqFtWMCoGNkhU97OPnR1fZSh6etAKfN1FU1KvXcV8,924 +future/backports/html/__pycache__/__init__.cpython-38.pyc,, +future/backports/html/__pycache__/entities.cpython-38.pyc,, +future/backports/html/__pycache__/parser.cpython-38.pyc,, +future/backports/html/entities.py,sha256=kzoRnQyGk_3DgoucHLhL5QL1pglK9nvmxhPIGZFDTnc,75428 +future/backports/html/parser.py,sha256=G2tUObvbHSotNt06JLY-BP1swaZNfDYFd_ENWDjPmRg,19770 +future/backports/http/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +future/backports/http/__pycache__/__init__.cpython-38.pyc,, +future/backports/http/__pycache__/client.cpython-38.pyc,, +future/backports/http/__pycache__/cookiejar.cpython-38.pyc,, +future/backports/http/__pycache__/cookies.cpython-38.pyc,, +future/backports/http/__pycache__/server.cpython-38.pyc,, +future/backports/http/client.py,sha256=76EbhEZOtvdHFcU-jrjivoff13oQ9IMbdkZEdf5kQzQ,47602 +future/backports/http/cookiejar.py,sha256=_Vy4BPT-h0ZT0R_utGQAFXzuOAdmU9KedGFffyX9wN4,76559 +future/backports/http/cookies.py,sha256=DsyDUGDEbCXAA9Jq6suswSc76uSZqUu39adDDNj8XGw,21581 +future/backports/http/server.py,sha256=1CaMxgzHf9lYhmTJyE7topgjRIlIn9cnjgw8YEvwJV4,45523 +future/backports/misc.py,sha256=AkbED6BdHKnYCmIAontT4zHKTqdPPfJfn35HIs6LDrg,32682 +future/backports/socket.py,sha256=DH1V6IjKPpJ0tln8bYvxvQ7qnvZG-UoQtMA5yVleHiU,15663 +future/backports/socketserver.py,sha256=Twvyk5FqVnOeiNcbVsyMDPTF1mNlkKfyofG7tKxTdD8,24286 +future/backports/test/__init__.py,sha256=9dXxIZnkI095YfHC-XIaVF6d31GjeY1Ag8TEzcFgepM,264 +future/backports/test/__pycache__/__init__.cpython-38.pyc,, +future/backports/test/__pycache__/pystone.cpython-38.pyc,, +future/backports/test/__pycache__/ssl_servers.cpython-38.pyc,, +future/backports/test/__pycache__/support.cpython-38.pyc,, +future/backports/test/badcert.pem,sha256=JioQeRZkHH8hGsWJjAF3U1zQvcWqhyzG6IOEJpTY9SE,1928 +future/backports/test/badkey.pem,sha256=gaBK9px_gG7DmrLKxfD6f6i-toAmARBTVfs-YGFRQF0,2162 +future/backports/test/dh512.pem,sha256=dUTsjtLbK-femrorUrTGF8qvLjhTiT_n4Uo5V6u__Gs,402 +future/backports/test/https_svn_python_org_root.pem,sha256=wOB3Onnc62Iu9kEFd8GcHhd_suucYjpJNA3jyfHeJWA,2569 +future/backports/test/keycert.passwd.pem,sha256=ZBfnVLpbBtAOf_2gCdiQ-yrBHmRsNzSf8VC3UpQZIjg,1830 +future/backports/test/keycert.pem,sha256=xPXi5idPcQVbrhgxBqF2TNGm6sSZ2aLVVEt6DWzplL8,1783 +future/backports/test/keycert2.pem,sha256=DB46FEAYv8BWwQJ-5RzC696FxPN7CON-Qsi-R4poJgc,1795 +future/backports/test/nokia.pem,sha256=s00x0uPDSaa5DHJ_CwzlVhg3OVdJ47f4zgqQdd0SAfQ,1923 +future/backports/test/nullbytecert.pem,sha256=NFRYWhmP_qT3jGfVjR6-iaC-EQdhIFjiXtTLN5ZPKnE,5435 +future/backports/test/nullcert.pem,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +future/backports/test/pystone.py,sha256=fvyoJ_tVovTNaxbJmdJMwr9F6SngY-U4ibULnd_wUqA,7427 +future/backports/test/sha256.pem,sha256=3wB-GQqEc7jq-PYwYAQaPbtTvvr7stk_DVmZxFgehfA,8344 +future/backports/test/ssl_cert.pem,sha256=M607jJNeIeHG9BlTf_jaQkPJI4nOxSJPn-zmEAaW43M,867 +future/backports/test/ssl_key.passwd.pem,sha256=I_WH4sBw9Vs9Z-BvmuXY0aw8tx8avv6rm5UL4S_pP00,963 +future/backports/test/ssl_key.pem,sha256=VKGU-R3UYaZpVTXl7chWl4vEYEDeob69SfvRTQ8aq_4,916 +future/backports/test/ssl_servers.py,sha256=-pd7HMZljuZfFRAbCAiAP_2G04orITJFj-S9ddr6o84,7209 +future/backports/test/support.py,sha256=zJrb-pz-Wu2dZwnNodg1v3w96zVq7ORuN-hOGOHbdA8,70881 +future/backports/total_ordering.py,sha256=O3M57_IisQ-zW5hW20uxkfk4fTGsr0EF2tAKx3BksQo,1929 +future/backports/urllib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +future/backports/urllib/__pycache__/__init__.cpython-38.pyc,, +future/backports/urllib/__pycache__/error.cpython-38.pyc,, +future/backports/urllib/__pycache__/parse.cpython-38.pyc,, +future/backports/urllib/__pycache__/request.cpython-38.pyc,, +future/backports/urllib/__pycache__/response.cpython-38.pyc,, +future/backports/urllib/__pycache__/robotparser.cpython-38.pyc,, +future/backports/urllib/error.py,sha256=ktikuK9ag4lS4f8Z0k5p1F11qF40N2AiOtjbXiF97ew,2715 +future/backports/urllib/parse.py,sha256=67avrYqV1UK7i_22goRUrvJ8SffzjRdTja9wzq_ynXY,35792 +future/backports/urllib/request.py,sha256=aR9ZMzfhV1C2Qk3wFsGvkwxqtdPTdsJVGRt5DUCwgJ8,96276 +future/backports/urllib/response.py,sha256=ooQyswwbb-9N6IVi1Kwjss1aR-Kvm8ZNezoyVEonp8c,3180 +future/backports/urllib/robotparser.py,sha256=pnAGTbKhdbCq_9yMZp7m8hj5q_NJpyQX6oQIZuYcnkw,6865 +future/backports/xmlrpc/__init__.py,sha256=h61ciVTdVvu8oEUXv4dHf_Tc5XUXDH3RKB1-8fQhSsg,38 +future/backports/xmlrpc/__pycache__/__init__.cpython-38.pyc,, +future/backports/xmlrpc/__pycache__/client.cpython-38.pyc,, +future/backports/xmlrpc/__pycache__/server.cpython-38.pyc,, +future/backports/xmlrpc/client.py,sha256=6a6Pvx_RVC9gIHDkFOVdREeGaZckOOiWd7T6GyzU3qU,48133 +future/backports/xmlrpc/server.py,sha256=W_RW5hgYbNV2LGbnvngzm7akacRdK-XFY-Cy2HL-qsY,37285 +future/builtins/__init__.py,sha256=jSdOucWfCsfkfTR8Jd4-Ls-YQpJ0AnzUomBxgwuoxNs,1687 +future/builtins/__pycache__/__init__.cpython-38.pyc,, +future/builtins/__pycache__/disabled.cpython-38.pyc,, +future/builtins/__pycache__/iterators.cpython-38.pyc,, +future/builtins/__pycache__/misc.cpython-38.pyc,, +future/builtins/__pycache__/new_min_max.cpython-38.pyc,, +future/builtins/__pycache__/newnext.cpython-38.pyc,, +future/builtins/__pycache__/newround.cpython-38.pyc,, +future/builtins/__pycache__/newsuper.cpython-38.pyc,, +future/builtins/disabled.py,sha256=Ysq74bsmwntpq7dzkwTAD7IHKrkXy66vJlPshVwgVBI,2109 +future/builtins/iterators.py,sha256=l1Zawm2x82oqOuGGtCZRE76Ej98sMlHQwu9fZLK5RrA,1396 +future/builtins/misc.py,sha256=hctlKKWUyN0Eoodxg4ySQHEqARTukOLR4L5K5c6PW9k,4550 +future/builtins/new_min_max.py,sha256=7qQ4iiG4GDgRzjPzzzmg9pdby35Mtt6xNOOsyqHnIGY,1757 +future/builtins/newnext.py,sha256=oxXB8baXqJv29YG40aCS9UXk9zObyoOjya8BJ7NdBJM,2009 +future/builtins/newround.py,sha256=l2EXPAFU3fAsZigJxUH6x66B7jhNaB076-L5FR617R8,3181 +future/builtins/newsuper.py,sha256=LmiUQ_f6NXDIz6v6sDPkoTWl-2Zccy7PpZfQKYtscac,4146 +future/moves/__init__.py,sha256=MsAW69Xp_fqUo4xODufcKM6AZf-ozHaz44WPZdsDFJA,220 +future/moves/__pycache__/__init__.cpython-38.pyc,, +future/moves/__pycache__/_dummy_thread.cpython-38.pyc,, +future/moves/__pycache__/_markupbase.cpython-38.pyc,, +future/moves/__pycache__/_thread.cpython-38.pyc,, +future/moves/__pycache__/builtins.cpython-38.pyc,, +future/moves/__pycache__/collections.cpython-38.pyc,, +future/moves/__pycache__/configparser.cpython-38.pyc,, +future/moves/__pycache__/copyreg.cpython-38.pyc,, +future/moves/__pycache__/itertools.cpython-38.pyc,, +future/moves/__pycache__/pickle.cpython-38.pyc,, +future/moves/__pycache__/queue.cpython-38.pyc,, +future/moves/__pycache__/reprlib.cpython-38.pyc,, +future/moves/__pycache__/socketserver.cpython-38.pyc,, +future/moves/__pycache__/subprocess.cpython-38.pyc,, +future/moves/__pycache__/sys.cpython-38.pyc,, +future/moves/__pycache__/winreg.cpython-38.pyc,, +future/moves/_dummy_thread.py,sha256=c8ZRUd8ffvyvGKGGgve5NKc8VdtAWquu8-4FnO2EdvA,175 +future/moves/_markupbase.py,sha256=W9wh_Gu3jDAMIhVBV1ZnCkJwYLHRk_v_su_HLALBkZQ,171 +future/moves/_thread.py,sha256=rwY7L4BZMFPlrp_i6T2Un4_iKYwnrXJ-yV6FJZN8YDo,163 +future/moves/builtins.py,sha256=4sjjKiylecJeL9da_RaBZjdymX2jtMs84oA9lCqb4Ug,281 +future/moves/collections.py,sha256=OKQ-TfUgms_2bnZRn4hrclLDoiN2i-HSWcjs3BC2iY8,417 +future/moves/configparser.py,sha256=TNy226uCbljjU-DjAVo7j7Effbj5zxXvDh0SdXehbzk,146 +future/moves/copyreg.py,sha256=Y3UjLXIMSOxZggXtvZucE9yv4tkKZtVan45z8eix4sU,438 +future/moves/dbm/__init__.py,sha256=_VkvQHC2UcIgZFPRroiX_P0Fs7HNqS_69flR0-oq2B8,488 +future/moves/dbm/__pycache__/__init__.cpython-38.pyc,, +future/moves/dbm/__pycache__/dumb.cpython-38.pyc,, +future/moves/dbm/__pycache__/gnu.cpython-38.pyc,, +future/moves/dbm/__pycache__/ndbm.cpython-38.pyc,, +future/moves/dbm/dumb.py,sha256=HKdjjtO3EyP9EKi1Hgxh_eUU6yCQ0fBX9NN3n-zb8JE,166 +future/moves/dbm/gnu.py,sha256=XoCSEpZ2QaOgo2h1m80GW7NUgj_b93BKtbcuwgtnaKo,162 +future/moves/dbm/ndbm.py,sha256=OFnreyo_1YHDBl5YUm9gCzKlN1MHgWbfSQAZVls2jaM,162 +future/moves/html/__init__.py,sha256=BSUFSHxXf2kGvHozlnrB1nn6bPE6p4PpN3DwA_Z5geo,1016 +future/moves/html/__pycache__/__init__.cpython-38.pyc,, +future/moves/html/__pycache__/entities.cpython-38.pyc,, +future/moves/html/__pycache__/parser.cpython-38.pyc,, +future/moves/html/entities.py,sha256=lVvchdjK_RzRj759eg4RMvGWHfgBbj0tKGOoZ8dbRyY,177 +future/moves/html/parser.py,sha256=V2XpHLKLCxQum3N9xlO3IUccAD7BIykZMqdEcWET3vY,167 +future/moves/http/__init__.py,sha256=Mx1v_Tcks4udHCtDM8q2xnYUiQ01gD7EpPyeQwsP3-Q,71 +future/moves/http/__pycache__/__init__.cpython-38.pyc,, +future/moves/http/__pycache__/client.cpython-38.pyc,, +future/moves/http/__pycache__/cookiejar.cpython-38.pyc,, +future/moves/http/__pycache__/cookies.cpython-38.pyc,, +future/moves/http/__pycache__/server.cpython-38.pyc,, +future/moves/http/client.py,sha256=hqEBq7GDXZidd1AscKnSyjSoMcuj8rERqGTmD7VheDQ,165 +future/moves/http/cookiejar.py,sha256=Frr9ZZCg-145ymy0VGpiPJhvBEpJtVqRBYPaKhgT1Z4,173 +future/moves/http/cookies.py,sha256=PPrHa1_oDbu3D_BhJGc6PvMgY1KoxyYq1jqeJwEcMvE,233 +future/moves/http/server.py,sha256=8YQlSCShjAsB5rr5foVvZgp3IzwYFvTmGZCHhBSDtaI,606 +future/moves/itertools.py,sha256=PVxFHRlBQl9ElS0cuGFPcUtj53eHX7Z1DmggzGfgQ6c,158 +future/moves/pickle.py,sha256=r8j9skzfE8ZCeHyh_OB-WucOkRTIHN7zpRM7l7V3qS4,229 +future/moves/queue.py,sha256=uxvLCChF-zxWWgrY1a_wxt8rp2jILdwO4PrnkBW6VTE,160 +future/moves/reprlib.py,sha256=Nt5sUgMQ3jeVIukqSHOvB0UIsl6Y5t-mmT_13mpZmiY,161 +future/moves/socketserver.py,sha256=v8ZLurDxHOgsubYm1iefjlpnnJQcx2VuRUGt9FCJB9k,174 +future/moves/subprocess.py,sha256=oqRSMfFZkxM4MXkt3oD5N6eBwmmJ6rQ9KPhvSQKT_hM,251 +future/moves/sys.py,sha256=HOMRX4Loim75FMbWawd3oEwuGNJR-ClMREEFkVpBsRs,132 +future/moves/test/__init__.py,sha256=yB9F-fDQpzu1v8cBoKgIrL2ScUNqjlkqEztYrGVCQ-0,110 +future/moves/test/__pycache__/__init__.cpython-38.pyc,, +future/moves/test/__pycache__/support.cpython-38.pyc,, +future/moves/test/support.py,sha256=6zGgTTXcERyBJIQ04-X-sAe781tVgLVHp3HzmQPy52g,259 +future/moves/tkinter/__init__.py,sha256=jV9vDx3wRl0bsoclU8oSe-5SqHQ3YpCbStmqtXnq1p4,620 +future/moves/tkinter/__pycache__/__init__.cpython-38.pyc,, +future/moves/tkinter/__pycache__/colorchooser.cpython-38.pyc,, +future/moves/tkinter/__pycache__/commondialog.cpython-38.pyc,, +future/moves/tkinter/__pycache__/constants.cpython-38.pyc,, +future/moves/tkinter/__pycache__/dialog.cpython-38.pyc,, +future/moves/tkinter/__pycache__/dnd.cpython-38.pyc,, +future/moves/tkinter/__pycache__/filedialog.cpython-38.pyc,, +future/moves/tkinter/__pycache__/font.cpython-38.pyc,, +future/moves/tkinter/__pycache__/messagebox.cpython-38.pyc,, +future/moves/tkinter/__pycache__/scrolledtext.cpython-38.pyc,, +future/moves/tkinter/__pycache__/simpledialog.cpython-38.pyc,, +future/moves/tkinter/__pycache__/tix.cpython-38.pyc,, +future/moves/tkinter/__pycache__/ttk.cpython-38.pyc,, +future/moves/tkinter/colorchooser.py,sha256=kprlmpRtvDbW5Gq43H1mi2KmNJ2kuzLQOba0a5EwDkU,333 +future/moves/tkinter/commondialog.py,sha256=mdUbq1IZqOGaSA7_8R367IukDCsMfzXiVHrTQQpp7Z0,333 +future/moves/tkinter/constants.py,sha256=0qRUrZLRPdVxueABL9KTzzEWEsk6xM1rOjxK6OHxXtA,324 +future/moves/tkinter/dialog.py,sha256=ksp-zvs-_A90P9RNHS8S27f1k8f48zG2Bel2jwZV5y0,311 +future/moves/tkinter/dnd.py,sha256=C_Ah0Urnyf2XKE5u-oP6mWi16RzMSXgMA1uhBSAwKY8,306 +future/moves/tkinter/filedialog.py,sha256=RSJFDGOP2AJ4T0ZscJ2hyF9ssOWp9t_S_DtnOmT-WZ8,323 +future/moves/tkinter/font.py,sha256=TXarflhJRxqepaRNSDw6JFUVGz5P1T1C4_uF9VRqj3w,309 +future/moves/tkinter/messagebox.py,sha256=WJt4t83kLmr_UnpCWFuLoyazZr3wAUOEl6ADn3osoEA,327 +future/moves/tkinter/scrolledtext.py,sha256=DRzN8aBAlDBUo1B2KDHzdpRSzXBfH4rOOz0iuHXbQcg,329 +future/moves/tkinter/simpledialog.py,sha256=6MhuVhZCJV4XfPpPSUWKlDLLGEi0Y2ZlGQ9TbsmJFL0,329 +future/moves/tkinter/tix.py,sha256=aNeOfbWSGmcN69UmEGf4tJ-QIxLT6SU5ynzm1iWgepA,302 +future/moves/tkinter/ttk.py,sha256=rRrJpDjcP2gjQNukECu4F026P-CkW-3Ca2tN6Oia-Fw,302 +future/moves/urllib/__init__.py,sha256=yB9F-fDQpzu1v8cBoKgIrL2ScUNqjlkqEztYrGVCQ-0,110 +future/moves/urllib/__pycache__/__init__.cpython-38.pyc,, +future/moves/urllib/__pycache__/error.cpython-38.pyc,, +future/moves/urllib/__pycache__/parse.cpython-38.pyc,, +future/moves/urllib/__pycache__/request.cpython-38.pyc,, +future/moves/urllib/__pycache__/response.cpython-38.pyc,, +future/moves/urllib/__pycache__/robotparser.cpython-38.pyc,, +future/moves/urllib/error.py,sha256=gfrKzv-6W5OjzNIfjvJaQkxABRLym2KwjfKFXSdDB60,479 +future/moves/urllib/parse.py,sha256=xLLUMIIB5MreCdYzRZ5zIRWrhTRCoMO8RZEH4WPFQDY,1045 +future/moves/urllib/request.py,sha256=ttIzq60PwjRyrLQUGdAtfYvs4fziVwvcLe2Kw-hvE0g,3496 +future/moves/urllib/response.py,sha256=ZEZML0FpbB--GIeBFPvSzbtlVJ6EsR4tCI4qB7D8sFQ,342 +future/moves/urllib/robotparser.py,sha256=j24p6dMNzUpGZtT3BQxwRoE-F88iWmBpKgu0tRV61FQ,179 +future/moves/winreg.py,sha256=2zNAG59QI7vFlCj7kqDh0JrAYTpexOnI55PEAIjYhqo,163 +future/moves/xmlrpc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +future/moves/xmlrpc/__pycache__/__init__.cpython-38.pyc,, +future/moves/xmlrpc/__pycache__/client.cpython-38.pyc,, +future/moves/xmlrpc/__pycache__/server.cpython-38.pyc,, +future/moves/xmlrpc/client.py,sha256=2PfnL5IbKVwdKP7C8B1OUviEtuBObwoH4pAPfvHIvQc,143 +future/moves/xmlrpc/server.py,sha256=ESDXdpUgTKyeFmCDSnJmBp8zONjJklsRJOvy4OtaALc,143 +future/standard_library/__init__.py,sha256=7paz9IsD5qv_tvk5Rre3YrlA2_2aS1FJfI7UlrzAtWY,27743 +future/standard_library/__pycache__/__init__.cpython-38.pyc,, +future/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +future/tests/__pycache__/__init__.cpython-38.pyc,, +future/tests/__pycache__/base.cpython-38.pyc,, +future/tests/base.py,sha256=7LTAKHJgUxOwmffD1kgcErVt2VouKcldPnq4iruqk_k,19956 +future/types/__init__.py,sha256=5fBxWqf_OTQ8jZ7k2TS34rFH14togeR488F4zBHIQ-s,6831 +future/types/__pycache__/__init__.cpython-38.pyc,, +future/types/__pycache__/newbytes.cpython-38.pyc,, +future/types/__pycache__/newdict.cpython-38.pyc,, +future/types/__pycache__/newint.cpython-38.pyc,, +future/types/__pycache__/newlist.cpython-38.pyc,, +future/types/__pycache__/newmemoryview.cpython-38.pyc,, +future/types/__pycache__/newobject.cpython-38.pyc,, +future/types/__pycache__/newopen.cpython-38.pyc,, +future/types/__pycache__/newrange.cpython-38.pyc,, +future/types/__pycache__/newstr.cpython-38.pyc,, +future/types/newbytes.py,sha256=D_kNDD9sbNJir2cUxxePiAuw2OW5irxVnu55uHmuK9E,16303 +future/types/newdict.py,sha256=2N7P44cWmWtiDHvlK5ir15mW492gg6uP2n65d5bsDy4,3100 +future/types/newint.py,sha256=hJiv9qUDrjl1xkfzNFNLzafsRMPoFcRFceoivUzVIek,13286 +future/types/newlist.py,sha256=-H5-fXodd-UQgTFnZBJdwE68CrgIL_jViYdv4w7q2rU,2284 +future/types/newmemoryview.py,sha256=LnARgiKqQ2zLwwDZ3owu1atoonPQkOneWMfxJCwB4_o,712 +future/types/newobject.py,sha256=AX_n8GwlDR2IY-xIwZCvu0Olj_Ca2aS57nlTihnFr-I,3358 +future/types/newopen.py,sha256=lcRNHWZ1UjEn_0_XKis1ZA5U6l-4c-CHlC0WX1sY4NI,810 +future/types/newrange.py,sha256=7sgJaRaC4WIUtZ40K-c1d5QWruyaCWGgTVFadKo8qYA,5294 +future/types/newstr.py,sha256=e0brkurI0IK--4ToQEO4Cz1FECZav4CyUGMKxlrcmK4,15758 +future/utils/__init__.py,sha256=wsvXsKx-DXZichQ10Rdml-CWMqS79RNNynmdvfISpCU,21828 +future/utils/__pycache__/__init__.cpython-38.pyc,, +future/utils/__pycache__/surrogateescape.cpython-38.pyc,, +future/utils/surrogateescape.py,sha256=7u4V4XlW83P5YSAJS2f92YUF8vsWthsiTnmAshOJL_M,6097 +libfuturize/__init__.py,sha256=CZA_KgvTQOPAY1_MrlJeQ6eMh2Eei4_KIv4JuyAkpfw,31 +libfuturize/__pycache__/__init__.cpython-38.pyc,, +libfuturize/__pycache__/fixer_util.cpython-38.pyc,, +libfuturize/__pycache__/main.cpython-38.pyc,, +libfuturize/fixer_util.py,sha256=Zhms5G97l40pyG1krQM2lCp-TxnocBdJkB2AbkAFnKY,17494 +libfuturize/fixes/__init__.py,sha256=5KEpUnjVsFCCsr_-zrikvJbLf9zslEJnFTH_5pBc33I,5236 +libfuturize/fixes/__pycache__/__init__.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_UserDict.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_absolute_import.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_add__future__imports_except_unicode_literals.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_basestring.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_bytes.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_cmp.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_division.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_division_safe.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_execfile.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_future_builtins.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_future_standard_library.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_future_standard_library_urllib.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_input.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_metaclass.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_next_call.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_object.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_oldstr_wrap.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_order___future__imports.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_print.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_print_with_import.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_raise.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_remove_old__future__imports.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_unicode_keep_u.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_unicode_literals_import.cpython-38.pyc,, +libfuturize/fixes/__pycache__/fix_xrange_with_import.cpython-38.pyc,, +libfuturize/fixes/fix_UserDict.py,sha256=jL4jXnGaUQTkG8RKfGXbU_HVTkB3MWZMQwUkqMAjB6I,3840 +libfuturize/fixes/fix_absolute_import.py,sha256=vkrF2FyQR5lSz2WmdqywzkEJVTC0eq4gh_REWBKHh7w,3140 +libfuturize/fixes/fix_add__future__imports_except_unicode_literals.py,sha256=Fr219VAzR8KWXc2_bfiqLl10EgxAWjL6cI3Mowt--VU,662 +libfuturize/fixes/fix_basestring.py,sha256=bHkKuMzhr5FMXwjXlMOjsod4S3rQkVdbzhoWV4-tl3Y,394 +libfuturize/fixes/fix_bytes.py,sha256=AhzOJes6EnPwgzboDjvURANbWKqciG6ZGaYW07PYQK8,685 +libfuturize/fixes/fix_cmp.py,sha256=Blq_Z0IGkYiKS83QzZ5wUgpJyZfQiZoEsWJ1VPyXgFY,701 +libfuturize/fixes/fix_division.py,sha256=gnrAi7stquiVUyi_De1H8q--43iQaSUX0CjnOmQ6O2w,228 +libfuturize/fixes/fix_division_safe.py,sha256=Y_HUfQJAxRClXkcfqWP5SFCsRYZOsLUsNjLXlGOA3cQ,3292 +libfuturize/fixes/fix_execfile.py,sha256=I5AcJ6vPZ7i70TChaq9inxqnZ4C04-yJyfAItGa8E3c,921 +libfuturize/fixes/fix_future_builtins.py,sha256=QBCRpD9XA7tbtfP4wmOF2DXquB4lq-eupkQj-QAxp0s,2027 +libfuturize/fixes/fix_future_standard_library.py,sha256=FVtflFt38efHe_SEX6k3m6IYAtKWjA4rAPZrlCv6yA0,733 +libfuturize/fixes/fix_future_standard_library_urllib.py,sha256=Rf81XcAXA-vwNvrhskf5sLExbR--Wkr5fiUcMYGAKzs,1001 +libfuturize/fixes/fix_input.py,sha256=bhaPNtMrZNbjWIDQCR7Iue5BxBj4rf0RJQ9_jiwvb-s,687 +libfuturize/fixes/fix_metaclass.py,sha256=GLB76wbuyUVciDgW9bgNNOBEnLeS_AR-fKABcPBZk6M,9568 +libfuturize/fixes/fix_next_call.py,sha256=01STG86Av9o5QcpQDJ6UbPhvxt9kKrkatiPeddXRgvA,3158 +libfuturize/fixes/fix_object.py,sha256=qalFIjn0VTWXG5sGOOoCvO65omjX5_9d40SUpwUjBdw,407 +libfuturize/fixes/fix_oldstr_wrap.py,sha256=UCR6Q2l-pVqJSrRTnQAWMlaqBoX7oX1VpG_w6Q0XcyY,1214 +libfuturize/fixes/fix_order___future__imports.py,sha256=ACUCw5NEGWvj6XA9rNj8BYha3ktxLvkM5Ssh5cyV644,829 +libfuturize/fixes/fix_print.py,sha256=92s1w2t9SynA3Y1_85-lexSBbgEWJM6lBrhCxVacfDc,3384 +libfuturize/fixes/fix_print_with_import.py,sha256=hVWn70Q1DPMUiHMyEqgUx-6sM1AylLj78v9pMc4LFw8,735 +libfuturize/fixes/fix_raise.py,sha256=mEXpM9sS6tenMmxayfqM-Kp9gUvaztTY61vFaqyMUuo,3884 +libfuturize/fixes/fix_remove_old__future__imports.py,sha256=j4EC1KEVgXhuQAqhYHnAruUjW6uczPjV_fTCSOLMuAw,851 +libfuturize/fixes/fix_unicode_keep_u.py,sha256=M8fcFxHeFnWVOKoQRpkMsnpd9qmUFubI2oFhO4ZPk7A,779 +libfuturize/fixes/fix_unicode_literals_import.py,sha256=wq-hb-9Yx3Az4ol-ylXZJPEDZ81EaPZeIy5VvpA0CEY,367 +libfuturize/fixes/fix_xrange_with_import.py,sha256=f074qStjMz3OtLjt1bKKZSxQnRbbb7HzEbqHt9wgqdw,479 +libfuturize/main.py,sha256=feICmcv0dzWhutvwz0unnIVxusbSlQZFDaxObkHebs8,13733 +libpasteurize/__init__.py,sha256=CZA_KgvTQOPAY1_MrlJeQ6eMh2Eei4_KIv4JuyAkpfw,31 +libpasteurize/__pycache__/__init__.cpython-38.pyc,, +libpasteurize/__pycache__/main.cpython-38.pyc,, +libpasteurize/fixes/__init__.py,sha256=ccdv-2MGjQMbq8XuEZBndHmbzGRrZnabksjXZLUv044,3719 +libpasteurize/fixes/__pycache__/__init__.cpython-38.pyc,, +libpasteurize/fixes/__pycache__/feature_base.cpython-38.pyc,, +libpasteurize/fixes/__pycache__/fix_add_all__future__imports.cpython-38.pyc,, +libpasteurize/fixes/__pycache__/fix_add_all_future_builtins.cpython-38.pyc,, +libpasteurize/fixes/__pycache__/fix_add_future_standard_library_import.cpython-38.pyc,, +libpasteurize/fixes/__pycache__/fix_annotations.cpython-38.pyc,, +libpasteurize/fixes/__pycache__/fix_division.cpython-38.pyc,, +libpasteurize/fixes/__pycache__/fix_features.cpython-38.pyc,, +libpasteurize/fixes/__pycache__/fix_fullargspec.cpython-38.pyc,, +libpasteurize/fixes/__pycache__/fix_future_builtins.cpython-38.pyc,, +libpasteurize/fixes/__pycache__/fix_getcwd.cpython-38.pyc,, +libpasteurize/fixes/__pycache__/fix_imports.cpython-38.pyc,, +libpasteurize/fixes/__pycache__/fix_imports2.cpython-38.pyc,, +libpasteurize/fixes/__pycache__/fix_kwargs.cpython-38.pyc,, +libpasteurize/fixes/__pycache__/fix_memoryview.cpython-38.pyc,, +libpasteurize/fixes/__pycache__/fix_metaclass.cpython-38.pyc,, +libpasteurize/fixes/__pycache__/fix_newstyle.cpython-38.pyc,, +libpasteurize/fixes/__pycache__/fix_next.cpython-38.pyc,, +libpasteurize/fixes/__pycache__/fix_printfunction.cpython-38.pyc,, +libpasteurize/fixes/__pycache__/fix_raise.cpython-38.pyc,, +libpasteurize/fixes/__pycache__/fix_raise_.cpython-38.pyc,, +libpasteurize/fixes/__pycache__/fix_throw.cpython-38.pyc,, +libpasteurize/fixes/__pycache__/fix_unpacking.cpython-38.pyc,, +libpasteurize/fixes/feature_base.py,sha256=v7yLjBDBUPeNUc-YHGGlIsJDOQzFAM4Vo0RN5F1JHVU,1723 +libpasteurize/fixes/fix_add_all__future__imports.py,sha256=mHet1LgbHn9GfgCYGNZXKo-rseDWreAvUcAjZwdgeTE,676 +libpasteurize/fixes/fix_add_all_future_builtins.py,sha256=scfkY-Sz5j0yDtLYls2ENOcqEMPVxeDm9gFYYPINPB8,1269 +libpasteurize/fixes/fix_add_future_standard_library_import.py,sha256=thTRbkBzy_SJjZ0bJteTp0sBTx8Wr69xFakH4styf7Y,663 +libpasteurize/fixes/fix_annotations.py,sha256=VT_AorKY9AYWYZUZ17_CeUrJlEA7VGkwVLDQlwD1Bxo,1581 +libpasteurize/fixes/fix_division.py,sha256=_TD_c5KniAYqEm11O7NJF0v2WEhYSNkRGcKG_94ZOas,904 +libpasteurize/fixes/fix_features.py,sha256=NZn0n34_MYZpLNwyP1Tf51hOiN58Rg7A8tA9pK1S8-c,2675 +libpasteurize/fixes/fix_fullargspec.py,sha256=VlZuIU6QNrClmRuvC4mtLICL3yMCi-RcGCnS9fD4b-Q,438 +libpasteurize/fixes/fix_future_builtins.py,sha256=SlCK9I9u05m19Lr1wxlJxF8toZ5yu0yXBeDLxUN9_fw,1450 +libpasteurize/fixes/fix_getcwd.py,sha256=uebvTvFboLqsROFCwdnzoP6ThziM0skz9TDXHoJcFsQ,873 +libpasteurize/fixes/fix_imports.py,sha256=U4lIs_5Xp1qqM8mN72ieDkkIdiyALZFyCZsRC8ZmXlM,4944 +libpasteurize/fixes/fix_imports2.py,sha256=bs2V5Yv0v_8xLx-lNj9kNEAK2dLYXUXkZ2hxECg01CU,8580 +libpasteurize/fixes/fix_kwargs.py,sha256=NB_Ap8YJk-9ncoJRbOiPY_VMIigFgVB8m8AuY29DDhE,5991 +libpasteurize/fixes/fix_memoryview.py,sha256=Fwayx_ezpr22tbJ0-QrKdJ-FZTpU-m7y78l1h_N4xxc,551 +libpasteurize/fixes/fix_metaclass.py,sha256=IcE2KjaDG8jUR3FYXECzOC_cr2pr5r95W1NTbMrK8Wc,3260 +libpasteurize/fixes/fix_newstyle.py,sha256=78sazKOHm9DUoMyW4VdvQpMXZhicbXzorVPRhBpSUrM,888 +libpasteurize/fixes/fix_next.py,sha256=VHqcyORRNVqKJ51jJ1OkhwxHuXRgp8qaldyqcMvA4J0,1233 +libpasteurize/fixes/fix_printfunction.py,sha256=NDIfqVmUJBG3H9E6nrnN0cWZK8ch9pL4F-nMexdsa38,401 +libpasteurize/fixes/fix_raise.py,sha256=zQ_AcMsGmCbtKMgrxZGcHLYNscw6tqXFvHQxgqtNbU8,1099 +libpasteurize/fixes/fix_raise_.py,sha256=9STp633frUfYASjYzqhwxx_MXePNmMhfJClowRj8FLY,1225 +libpasteurize/fixes/fix_throw.py,sha256=_ZREVre-WttUvk4sWjrqUNqm9Q1uFaATECN0_-PXKbk,835 +libpasteurize/fixes/fix_unpacking.py,sha256=eMqRe44Nfq8lo0YFL9oKW75dGARmBSmklj4BCS_q1Lo,5946 +libpasteurize/main.py,sha256=dVHYTQQeJonuOFDNrenJZl-rKHgOQKRMPP1OqnJogWQ,8186 +past/__init__.py,sha256=wIiXaAvXl3svDi-fzuy6HDD0VsuCVr4cnqnCr8XINGI,2918 +past/__pycache__/__init__.cpython-38.pyc,, +past/builtins/__init__.py,sha256=7j_4OsUlN6q2eKr14do7mRQ1GwXRoXAMUR0A1fJpAls,1805 +past/builtins/__pycache__/__init__.cpython-38.pyc,, +past/builtins/__pycache__/misc.cpython-38.pyc,, +past/builtins/__pycache__/noniterators.cpython-38.pyc,, +past/builtins/misc.py,sha256=nw62HVSxuAgT-Q2lD3lmgRB9zmFXopS14dZHEv5xpDQ,2627 +past/builtins/noniterators.py,sha256=LtdELnd7KyYdXg7GkW25cgkEPUC0ggZ5AYMtDe9N95I,9370 +past/translation/__init__.py,sha256=j2e6mLeK74KEICqH6P_-tpKqSNZoMwip2toThhSmKpU,17646 +past/translation/__pycache__/__init__.cpython-38.pyc,, +past/types/__init__.py,sha256=RyJlgqg9uJ8oF-kJT9QlfhfdmhiMh3fShmtvd2CQycY,879 +past/types/__pycache__/__init__.cpython-38.pyc,, +past/types/__pycache__/basestring.cpython-38.pyc,, +past/types/__pycache__/olddict.cpython-38.pyc,, +past/types/__pycache__/oldstr.cpython-38.pyc,, +past/types/basestring.py,sha256=qrImcr24wvdDCMvF9x0Tyx8S1lCt6GIwRvzuAmvg_Tg,728 +past/types/olddict.py,sha256=0YtffZ55VY6AyQ_rwu4DZ4vcRsp6dz-dQzczeyN8hLk,2721 +past/types/oldstr.py,sha256=J2sJPC5jWEdpqXPcFwJFNDKn51TKhi86PsLFmJtQr-M,4332 +past/utils/__init__.py,sha256=e8l1sOfdiDJ3dkckBWLNWvC1ahC5BX5haHC2TGdNgA8,2633 +past/utils/__pycache__/__init__.cpython-38.pyc,, diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/future-0.18.2.dist-info/REQUESTED b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/future-0.18.2.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/future-0.18.2.dist-info/WHEEL b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/future-0.18.2.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..385faab0525ccdbfd1070a8bebcca3ac8617236e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/future-0.18.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.36.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/future-0.18.2.dist-info/top_level.txt b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/future-0.18.2.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..58f5843c6c613398a7a9cad8f562de3e7f61d314 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/future-0.18.2.dist-info/top_level.txt @@ -0,0 +1,4 @@ +future +libfuturize +libpasteurize +past diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..97ef0d451b90c164a3e38b7b8a233765bf32e598 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +# Source of truth for Hydra's version +__version__ = "1.2.0" +from hydra import utils +from hydra.errors import MissingConfigException +from hydra.main import main +from hydra.types import TaskFunction + +from .compose import compose +from .initialize import initialize, initialize_config_dir, initialize_config_module + +__all__ = [ + "__version__", + "MissingConfigException", + "main", + "utils", + "TaskFunction", + "compose", + "initialize", + "initialize_config_module", + "initialize_config_dir", +] diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/compose.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/compose.py new file mode 100644 index 0000000000000000000000000000000000000000..fcc89228b7111cef12aee6a88246dd37d8dca3a5 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/compose.py @@ -0,0 +1,62 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from textwrap import dedent +from typing import List, Optional + +from omegaconf import DictConfig, OmegaConf, open_dict + +from hydra import version +from hydra.core.global_hydra import GlobalHydra +from hydra.types import RunMode + +from ._internal.deprecation_warning import deprecation_warning + + +def compose( + config_name: Optional[str] = None, + overrides: List[str] = [], + return_hydra_config: bool = False, + strict: Optional[bool] = None, +) -> DictConfig: + """ + :param config_name: the name of the config + (usually the file name without the .yaml extension) + :param overrides: list of overrides for config file + :param return_hydra_config: True to return the hydra config node in the result + :param strict: DEPRECATED. If false, returned config has struct mode disabled. + :return: the composed config + """ + assert ( + GlobalHydra().is_initialized() + ), "GlobalHydra is not initialized, use @hydra.main() or call one of the hydra initialization methods first" + + gh = GlobalHydra.instance() + assert gh.hydra is not None + cfg = gh.hydra.compose_config( + config_name=config_name, + overrides=overrides, + run_mode=RunMode.RUN, + from_shell=False, + with_log_configuration=False, + ) + assert isinstance(cfg, DictConfig) + + if not return_hydra_config: + if "hydra" in cfg: + with open_dict(cfg): + del cfg["hydra"] + + if strict is not None: + if version.base_at_least("1.2"): + raise TypeError("got an unexpected 'strict' argument") + else: + deprecation_warning( + dedent( + """ + The strict flag in the compose API is deprecated. + See https://hydra.cc/docs/upgrades/0.11_to_1.0/strict_mode_flag_deprecated for more info. + """ + ) + ) + OmegaConf.set_struct(cfg, strict) + + return cfg diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/errors.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/errors.py new file mode 100644 index 0000000000000000000000000000000000000000..16849a2f550f696fa5d96b1777dd8b61fbb516d9 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/errors.py @@ -0,0 +1,45 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from typing import Optional, Sequence + + +class HydraException(Exception): + ... + + +class CompactHydraException(HydraException): + ... + + +class OverrideParseException(CompactHydraException): + def __init__(self, override: str, message: str) -> None: + super(OverrideParseException, self).__init__(message) + self.override = override + self.message = message + + +class InstantiationException(CompactHydraException): + ... + + +class ConfigCompositionException(CompactHydraException): + ... + + +class SearchPathException(CompactHydraException): + ... + + +class MissingConfigException(IOError, ConfigCompositionException): + def __init__( + self, + message: str, + missing_cfg_file: Optional[str], + options: Optional[Sequence[str]] = None, + ) -> None: + super(MissingConfigException, self).__init__(message) + self.missing_cfg_file = missing_cfg_file + self.options = options + + +class HydraDeprecationError(HydraException): + ... diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/initialize.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/initialize.py new file mode 100644 index 0000000000000000000000000000000000000000..ed94e736f2e5e33915d67cacccbdf45953923718 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/initialize.py @@ -0,0 +1,179 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import copy +import os +from textwrap import dedent +from typing import Any, Optional + +from hydra import version +from hydra._internal.deprecation_warning import deprecation_warning +from hydra._internal.hydra import Hydra +from hydra._internal.utils import ( + create_config_search_path, + detect_calling_file_or_module_from_stack_frame, + detect_task_name, +) +from hydra.core.global_hydra import GlobalHydra +from hydra.core.singleton import Singleton +from hydra.errors import HydraException + + +def get_gh_backup() -> Any: + if GlobalHydra in Singleton._instances: + return copy.deepcopy(Singleton._instances[GlobalHydra]) + else: + return None + + +def restore_gh_from_backup(_gh_backup: Any) -> Any: + if _gh_backup is None: + del Singleton._instances[GlobalHydra] + else: + Singleton._instances[GlobalHydra] = _gh_backup + + +_UNSPECIFIED_: Any = object() + + +class initialize: + """ + Initializes Hydra and add the config_path to the config search path. + config_path is relative to the parent of the caller. + Hydra detects the caller type automatically at runtime. + + Supported callers: + - Python scripts + - Python modules + - Unit tests + - Jupyter notebooks. + :param config_path: path relative to the parent of the caller + :param job_name: the value for hydra.job.name (By default it is automatically detected based on the caller) + :param caller_stack_depth: stack depth of the caller, defaults to 1 (direct caller). + """ + + def __init__( + self, + config_path: Optional[str] = _UNSPECIFIED_, + version_base: Optional[str] = _UNSPECIFIED_, + job_name: Optional[str] = None, + caller_stack_depth: int = 1, + ) -> None: + self._gh_backup = get_gh_backup() + + version.setbase(version_base) + + if config_path is _UNSPECIFIED_: + if version.base_at_least("1.2"): + config_path = None + elif version_base is _UNSPECIFIED_: + url = "https://hydra.cc/docs/next/upgrades/1.0_to_1.1/changes_to_hydra_main_config_path" + deprecation_warning( + message=dedent( + f"""\ + config_path is not specified in hydra.initialize(). + See {url} for more information.""" + ), + stacklevel=2, + ) + config_path = "." + else: + config_path = "." + + if config_path is not None and os.path.isabs(config_path): + raise HydraException("config_path in initialize() must be relative") + calling_file, calling_module = detect_calling_file_or_module_from_stack_frame( + caller_stack_depth + 1 + ) + if job_name is None: + job_name = detect_task_name( + calling_file=calling_file, calling_module=calling_module + ) + + Hydra.create_main_hydra_file_or_module( + calling_file=calling_file, + calling_module=calling_module, + config_path=config_path, + job_name=job_name, + ) + + def __enter__(self, *args: Any, **kwargs: Any) -> None: + ... + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + restore_gh_from_backup(self._gh_backup) + + def __repr__(self) -> str: + return "hydra.initialize()" + + +class initialize_config_module: + """ + Initializes Hydra and add the config_module to the config search path. + The config module must be importable (an __init__.py must exist at its top level) + :param config_module: absolute module name, for example "foo.bar.conf". + :param job_name: the value for hydra.job.name (default is 'app') + """ + + def __init__( + self, + config_module: str, + version_base: Optional[str] = _UNSPECIFIED_, + job_name: str = "app", + ): + self._gh_backup = get_gh_backup() + + version.setbase(version_base) + + Hydra.create_main_hydra_file_or_module( + calling_file=None, + calling_module=f"{config_module}.{job_name}", + config_path=None, + job_name=job_name, + ) + + def __enter__(self, *args: Any, **kwargs: Any) -> None: + ... + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + restore_gh_from_backup(self._gh_backup) + + def __repr__(self) -> str: + return "hydra.initialize_config_module()" + + +class initialize_config_dir: + """ + Initializes Hydra and add an absolute config dir to the to the config search path. + The config_dir is always a path on the file system and is must be an absolute path. + Relative paths will result in an error. + :param config_dir: absolute file system path + :param job_name: the value for hydra.job.name (default is 'app') + """ + + def __init__( + self, + config_dir: str, + version_base: Optional[str] = _UNSPECIFIED_, + job_name: str = "app", + ) -> None: + self._gh_backup = get_gh_backup() + + version.setbase(version_base) + + # Relative here would be interpreted as relative to cwd, which - depending on when it run + # may have unexpected meaning. best to force an absolute path to avoid confusion. + # Can consider using hydra.utils.to_absolute_path() to convert it at a future point if there is demand. + if not os.path.isabs(config_dir): + raise HydraException( + "initialize_config_dir() requires an absolute config_dir as input" + ) + csp = create_config_search_path(search_path_dir=config_dir) + Hydra.create_main_hydra2(task_name=job_name, config_search_path=csp) + + def __enter__(self, *args: Any, **kwargs: Any) -> None: + ... + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + restore_gh_from_backup(self._gh_backup) + + def __repr__(self) -> str: + return "hydra.initialize_config_dir()" diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/main.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/main.py new file mode 100644 index 0000000000000000000000000000000000000000..9f58271396fe61f35735604a5b810c4ee4ecd13f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/main.py @@ -0,0 +1,100 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import copy +import functools +import pickle +import warnings +from pathlib import Path +from textwrap import dedent +from typing import Any, Callable, List, Optional + +from omegaconf import DictConfig, open_dict, read_write + +from . import version +from ._internal.deprecation_warning import deprecation_warning +from ._internal.utils import _run_hydra, get_args_parser +from .core.hydra_config import HydraConfig +from .core.utils import _flush_loggers, configure_log +from .types import TaskFunction + +_UNSPECIFIED_: Any = object() + + +def _get_rerun_conf(file_path: str, overrides: List[str]) -> DictConfig: + msg = "Experimental rerun CLI option, other command line args are ignored." + warnings.warn(msg, UserWarning) + file = Path(file_path) + if not file.exists(): + raise ValueError(f"File {file} does not exist!") + + if len(overrides) > 0: + msg = "Config overrides are not supported as of now." + warnings.warn(msg, UserWarning) + + with open(str(file), "rb") as input: + config = pickle.load(input) # nosec + configure_log(config.hydra.job_logging, config.hydra.verbose) + HydraConfig.instance().set_config(config) + task_cfg = copy.deepcopy(config) + with read_write(task_cfg): + with open_dict(task_cfg): + del task_cfg["hydra"] + assert isinstance(task_cfg, DictConfig) + return task_cfg + + +def main( + config_path: Optional[str] = _UNSPECIFIED_, + config_name: Optional[str] = None, + version_base: Optional[str] = _UNSPECIFIED_, +) -> Callable[[TaskFunction], Any]: + """ + :param config_path: The config path, a directory relative to the declaring python file. + If config_path is None no directory is added to the Config search path. + :param config_name: The name of the config (usually the file name without the .yaml extension) + """ + + version.setbase(version_base) + + if config_path is _UNSPECIFIED_: + if version.base_at_least("1.2"): + config_path = None + elif version_base is _UNSPECIFIED_: + url = "https://hydra.cc/docs/next/upgrades/1.0_to_1.1/changes_to_hydra_main_config_path" + deprecation_warning( + message=dedent( + f""" + config_path is not specified in @hydra.main(). + See {url} for more information.""" + ), + stacklevel=2, + ) + config_path = "." + else: + config_path = "." + + def main_decorator(task_function: TaskFunction) -> Callable[[], None]: + @functools.wraps(task_function) + def decorated_main(cfg_passthrough: Optional[DictConfig] = None) -> Any: + if cfg_passthrough is not None: + return task_function(cfg_passthrough) + else: + args_parser = get_args_parser() + args = args_parser.parse_args() + if args.experimental_rerun is not None: + cfg = _get_rerun_conf(args.experimental_rerun, args.overrides) + task_function(cfg) + _flush_loggers() + else: + # no return value from run_hydra() as it may sometime actually run the task_function + # multiple times (--multirun) + _run_hydra( + args=args, + args_parser=args_parser, + task_function=task_function, + config_path=config_path, + config_name=config_name, + ) + + return decorated_main + + return main_decorator diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/py.typed b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/types.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/types.py new file mode 100644 index 0000000000000000000000000000000000000000..767f557b1e0da7725bbebefb1d12d3662b84659e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/types.py @@ -0,0 +1,82 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from dataclasses import dataclass +from enum import Enum +from typing import TYPE_CHECKING, Any, Callable + +from omegaconf import MISSING + +from hydra import version + +from ._internal.deprecation_warning import deprecation_warning + +TaskFunction = Callable[[Any], Any] + + +if TYPE_CHECKING: + from hydra._internal.callbacks import Callbacks + from hydra.core.config_loader import ConfigLoader + + +@dataclass +class HydraContext: + config_loader: "ConfigLoader" + callbacks: "Callbacks" + + +@dataclass +class TargetConf: + """ + This class is going away in Hydra 1.2. + You should no longer extend it or annotate with it. + instantiate will work correctly if you pass in a DictConfig object or any dataclass that has the + _target_ attribute. + """ + + _target_: str = MISSING + + def __post_init__(self) -> None: + if version.base_at_least("1.2"): + raise TypeError("TargetConf is unsupported since Hydra 1.2") + else: + msg = "\nTargetConf is deprecated since Hydra 1.1 and will be removed in Hydra 1.2." + deprecation_warning(message=msg) + + +class RunMode(Enum): + RUN = 1 + MULTIRUN = 2 + + +class ConvertMode(Enum): + """ConvertMode for instantiate, controls return type. + + A config is either config or instance-like (`_target_` field). + + If instance-like, instantiate resolves the callable (class or + function) and returns the result of the call on the rest of the + parameters. + + If "none", config-like configs will be kept as is. + + If "partial", config-like configs will be converted to native python + containers (list and dict), unless they are structured configs ( + dataclasses or attr instances). + + If "all", config-like configs will all be converted to native python + containers (list and dict). + """ + + # Use DictConfig/ListConfig + NONE = "none" + # Convert the OmegaConf config to primitive container, Structured Configs are preserved + PARTIAL = "partial" + # Fully convert the OmegaConf config to primitive containers (dict, list and primitives). + ALL = "all" + + def __eq__(self, other: Any) -> Any: + if isinstance(other, ConvertMode): + return other.value == self.value + elif isinstance(other, str): + return other.upper() == self.name.upper() + else: + return NotImplemented diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/utils.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..33c1ac1dc8de5962be9398429b8f40f3868b5e96 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/utils.py @@ -0,0 +1,84 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import logging.config +import os +from pathlib import Path +from typing import Any, Callable + +import hydra._internal.instantiate._instantiate2 +import hydra.types +from hydra._internal.utils import _locate +from hydra.core.hydra_config import HydraConfig + +log = logging.getLogger(__name__) + +# Instantiation related symbols +instantiate = hydra._internal.instantiate._instantiate2.instantiate +call = instantiate +ConvertMode = hydra.types.ConvertMode + + +def get_class(path: str) -> type: + try: + cls = _locate(path) + if not isinstance(cls, type): + raise ValueError( + f"Located non-class of type '{type(cls).__name__}'" + + f" while loading '{path}'" + ) + return cls + except Exception as e: + log.error(f"Error initializing class at {path}: {e}") + raise e + + +def get_method(path: str) -> Callable[..., Any]: + try: + obj = _locate(path) + if not callable(obj): + raise ValueError( + f"Located non-callable of type '{type(obj).__name__}'" + + f" while loading '{path}'" + ) + cl: Callable[..., Any] = obj + return cl + except Exception as e: + log.error(f"Error getting callable at {path} : {e}") + raise e + + +# Alias for get_method +get_static_method = get_method + + +def get_original_cwd() -> str: + """ + :return: the original working directory the Hydra application was launched from + """ + if not HydraConfig.initialized(): + raise ValueError( + "get_original_cwd() must only be used after HydraConfig is initialized" + ) + ret = HydraConfig.get().runtime.cwd + assert ret is not None and isinstance(ret, str) + return ret + + +def to_absolute_path(path: str) -> str: + """ + converts the specified path to be absolute path. + if the input path is relative, it's interpreted as relative to the original working directory + if it's absolute, it's returned as is + :param path: path to convert + :return: + """ + p = Path(path) + if not HydraConfig.initialized(): + base = Path(os.getcwd()) + else: + base = Path(get_original_cwd()) + if p.is_absolute(): + ret = p + else: + ret = base / p + return str(ret) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/version.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/version.py new file mode 100644 index 0000000000000000000000000000000000000000..ec6c329b0ea001de5d3acbd45cee74abad85bfa9 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/hydra/version.py @@ -0,0 +1,80 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +# Source of truth for Hydra's version + +from textwrap import dedent +from typing import Any, Optional + +from packaging.version import Version + +from . import __version__ +from ._internal.deprecation_warning import deprecation_warning +from .core.singleton import Singleton +from .errors import HydraException + +_UNSPECIFIED_: Any = object() + +__compat_version__: Version = Version("1.1") + + +class VersionBase(metaclass=Singleton): + def __init__(self) -> None: + self.version_base: Optional[Version] = _UNSPECIFIED_ + + def setbase(self, version: "Version") -> None: + assert isinstance( + version, Version + ), f"Unexpected Version type : {type(version)}" + self.version_base = version + + def getbase(self) -> Optional[Version]: + return self.version_base + + @staticmethod + def instance(*args: Any, **kwargs: Any) -> "VersionBase": + return Singleton.instance(VersionBase, *args, **kwargs) # type: ignore + + @staticmethod + def set_instance(instance: "VersionBase") -> None: + assert isinstance(instance, VersionBase) + Singleton._instances[VersionBase] = instance # type: ignore + + +def _get_version(ver: str) -> Version: + # Only consider major.minor as packaging will compare "1.2.0.dev2" < "1.2" + pver = Version(ver) + return Version(f"{pver.major}.{pver.minor}") + + +def base_at_least(ver: str) -> bool: + _version_base = VersionBase.instance().getbase() + if type(_version_base) is type(_UNSPECIFIED_): + VersionBase.instance().setbase(__compat_version__) + _version_base = __compat_version__ + assert isinstance(_version_base, Version) + return _version_base >= _get_version(ver) + + +def getbase() -> Optional[Version]: + return VersionBase.instance().getbase() + + +def setbase(ver: Any) -> None: + if type(ver) is type(_UNSPECIFIED_): + deprecation_warning( + message=dedent( + f""" + The version_base parameter is not specified. + Please specify a compatability version level, or None. + Will assume defaults for version {__compat_version__}""" + ), + stacklevel=3, + ) + _version_base = __compat_version__ + elif ver is None: + _version_base = _get_version(__version__) + else: + _version_base = _get_version(ver) + if _version_base < __compat_version__: + raise HydraException(f'version_base must be >= "{__compat_version__}"') + VersionBase.instance().setbase(_version_base) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging-24.0.dist-info/LICENSE.BSD b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging-24.0.dist-info/LICENSE.BSD new file mode 100644 index 0000000000000000000000000000000000000000..42ce7b75c92fb01a3f6ed17eea363f756b7da582 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging-24.0.dist-info/LICENSE.BSD @@ -0,0 +1,23 @@ +Copyright (c) Donald Stufft and individual contributors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging-24.0.dist-info/METADATA b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging-24.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..10ab4390a96923dea26efffa4c42eaacb502536b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging-24.0.dist-info/METADATA @@ -0,0 +1,102 @@ +Metadata-Version: 2.1 +Name: packaging +Version: 24.0 +Summary: Core utilities for Python packages +Author-email: Donald Stufft +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Typing :: Typed +Project-URL: Documentation, https://packaging.pypa.io/ +Project-URL: Source, https://github.com/pypa/packaging + +packaging +========= + +.. start-intro + +Reusable core utilities for various Python Packaging +`interoperability specifications `_. + +This library provides utilities that implement the interoperability +specifications which have clearly one correct behaviour (eg: :pep:`440`) +or benefit greatly from having a single shared implementation (eg: :pep:`425`). + +.. end-intro + +The ``packaging`` project includes the following: version handling, specifiers, +markers, requirements, tags, utilities. + +Documentation +------------- + +The `documentation`_ provides information and the API for the following: + +- Version Handling +- Specifiers +- Markers +- Requirements +- Tags +- Utilities + +Installation +------------ + +Use ``pip`` to install these utilities:: + + pip install packaging + +The ``packaging`` library uses calendar-based versioning (``YY.N``). + +Discussion +---------- + +If you run into bugs, you can file them in our `issue tracker`_. + +You can also join ``#pypa`` on Freenode to ask questions or get involved. + + +.. _`documentation`: https://packaging.pypa.io/ +.. _`issue tracker`: https://github.com/pypa/packaging/issues + + +Code of Conduct +--------------- + +Everyone interacting in the packaging project's codebases, issue trackers, chat +rooms, and mailing lists is expected to follow the `PSF Code of Conduct`_. + +.. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md + +Contributing +------------ + +The ``CONTRIBUTING.rst`` file outlines how to contribute to this project as +well as how to report a potential security issue. The documentation for this +project also covers information about `project development`_ and `security`_. + +.. _`project development`: https://packaging.pypa.io/en/latest/development/ +.. _`security`: https://packaging.pypa.io/en/latest/security/ + +Project History +--------------- + +Please review the ``CHANGELOG.rst`` file or the `Changelog documentation`_ for +recent changes and project history. + +.. _`Changelog documentation`: https://packaging.pypa.io/en/latest/changelog/ + diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging-24.0.dist-info/RECORD b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging-24.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..adbbd1bb0c4ce7b3f65363b333ba52538a7346d1 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging-24.0.dist-info/RECORD @@ -0,0 +1,36 @@ +packaging-24.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +packaging-24.0.dist-info/LICENSE,sha256=ytHvW9NA1z4HS6YU0m996spceUDD2MNIUuZcSQlobEg,197 +packaging-24.0.dist-info/LICENSE.APACHE,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174 +packaging-24.0.dist-info/LICENSE.BSD,sha256=tw5-m3QvHMb5SLNMFqo5_-zpQZY2S8iP8NIYDwAo-sU,1344 +packaging-24.0.dist-info/METADATA,sha256=0dESdhY_wHValuOrbgdebiEw04EbX4dkujlxPdEsFus,3203 +packaging-24.0.dist-info/RECORD,, +packaging-24.0.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81 +packaging/__init__.py,sha256=UzotcV07p8vcJzd80S-W0srhgY8NMVD_XvJcZ7JN-tA,496 +packaging/__pycache__/__init__.cpython-38.pyc,, +packaging/__pycache__/_elffile.cpython-38.pyc,, +packaging/__pycache__/_manylinux.cpython-38.pyc,, +packaging/__pycache__/_musllinux.cpython-38.pyc,, +packaging/__pycache__/_parser.cpython-38.pyc,, +packaging/__pycache__/_structures.cpython-38.pyc,, +packaging/__pycache__/_tokenizer.cpython-38.pyc,, +packaging/__pycache__/markers.cpython-38.pyc,, +packaging/__pycache__/metadata.cpython-38.pyc,, +packaging/__pycache__/requirements.cpython-38.pyc,, +packaging/__pycache__/specifiers.cpython-38.pyc,, +packaging/__pycache__/tags.cpython-38.pyc,, +packaging/__pycache__/utils.cpython-38.pyc,, +packaging/__pycache__/version.cpython-38.pyc,, +packaging/_elffile.py,sha256=hbmK8OD6Z7fY6hwinHEUcD1by7czkGiNYu7ShnFEk2k,3266 +packaging/_manylinux.py,sha256=1ng_TqyH49hY6s3W_zVHyoJIaogbJqbIF1jJ0fAehc4,9590 +packaging/_musllinux.py,sha256=kgmBGLFybpy8609-KTvzmt2zChCPWYvhp5BWP4JX7dE,2676 +packaging/_parser.py,sha256=zlsFB1FpMRjkUdQb6WLq7xON52ruQadxFpYsDXWhLb4,10347 +packaging/_structures.py,sha256=q3eVNmbWJGG_S0Dit_S3Ao8qQqz_5PYTXFAKBZe5yr4,1431 +packaging/_tokenizer.py,sha256=alCtbwXhOFAmFGZ6BQ-wCTSFoRAJ2z-ysIf7__MTJ_k,5292 +packaging/markers.py,sha256=eH-txS2zq1HdNpTd9LcZUcVIwewAiNU0grmq5wjKnOk,8208 +packaging/metadata.py,sha256=w7jPEg6mDf1FTZMn79aFxFuk4SKtynUJtxr2InTxlV4,33036 +packaging/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +packaging/requirements.py,sha256=dgoBeVprPu2YE6Q8nGfwOPTjATHbRa_ZGLyXhFEln6Q,2933 +packaging/specifiers.py,sha256=dB2DwbmvSbEuVilEyiIQ382YfW5JfwzXTfRRPVtaENY,39784 +packaging/tags.py,sha256=fedHXiOHkBxNZTXotXv8uXPmMFU9ae-TKBujgYHigcA,18950 +packaging/utils.py,sha256=XgdmP3yx9-wQEFjO7OvMj9RjEf5JlR5HFFR69v7SQ9E,5268 +packaging/version.py,sha256=XjRBLNK17UMDgLeP8UHnqwiY3TdSi03xFQURtec211A,16236 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging-24.0.dist-info/WHEEL b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging-24.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..3b5e64b5e6c4a210201d1676a891fd57b15cda99 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging-24.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.9.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging/_structures.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging/_structures.py new file mode 100644 index 0000000000000000000000000000000000000000..90a6465f9682c886363eea5327dac64bf623a6ff --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging/_structures.py @@ -0,0 +1,61 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +class InfinityType: + def __repr__(self) -> str: + return "Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return False + + def __le__(self, other: object) -> bool: + return False + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return True + + def __ge__(self, other: object) -> bool: + return True + + def __neg__(self: object) -> "NegativeInfinityType": + return NegativeInfinity + + +Infinity = InfinityType() + + +class NegativeInfinityType: + def __repr__(self) -> str: + return "-Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return True + + def __le__(self, other: object) -> bool: + return True + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return False + + def __ge__(self, other: object) -> bool: + return False + + def __neg__(self: object) -> InfinityType: + return Infinity + + +NegativeInfinity = NegativeInfinityType() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging/metadata.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging/metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..fb274930799da0f8ee17566b5b587b4047282c7b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging/metadata.py @@ -0,0 +1,825 @@ +import email.feedparser +import email.header +import email.message +import email.parser +import email.policy +import sys +import typing +from typing import ( + Any, + Callable, + Dict, + Generic, + List, + Optional, + Tuple, + Type, + Union, + cast, +) + +from . import requirements, specifiers, utils, version as version_module + +T = typing.TypeVar("T") +if sys.version_info[:2] >= (3, 8): # pragma: no cover + from typing import Literal, TypedDict +else: # pragma: no cover + if typing.TYPE_CHECKING: + from typing_extensions import Literal, TypedDict + else: + try: + from typing_extensions import Literal, TypedDict + except ImportError: + + class Literal: + def __init_subclass__(*_args, **_kwargs): + pass + + class TypedDict: + def __init_subclass__(*_args, **_kwargs): + pass + + +try: + ExceptionGroup +except NameError: # pragma: no cover + + class ExceptionGroup(Exception): # noqa: N818 + """A minimal implementation of :external:exc:`ExceptionGroup` from Python 3.11. + + If :external:exc:`ExceptionGroup` is already defined by Python itself, + that version is used instead. + """ + + message: str + exceptions: List[Exception] + + def __init__(self, message: str, exceptions: List[Exception]) -> None: + self.message = message + self.exceptions = exceptions + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.message!r}, {self.exceptions!r})" + +else: # pragma: no cover + ExceptionGroup = ExceptionGroup + + +class InvalidMetadata(ValueError): + """A metadata field contains invalid data.""" + + field: str + """The name of the field that contains invalid data.""" + + def __init__(self, field: str, message: str) -> None: + self.field = field + super().__init__(message) + + +# The RawMetadata class attempts to make as few assumptions about the underlying +# serialization formats as possible. The idea is that as long as a serialization +# formats offer some very basic primitives in *some* way then we can support +# serializing to and from that format. +class RawMetadata(TypedDict, total=False): + """A dictionary of raw core metadata. + + Each field in core metadata maps to a key of this dictionary (when data is + provided). The key is lower-case and underscores are used instead of dashes + compared to the equivalent core metadata field. Any core metadata field that + can be specified multiple times or can hold multiple values in a single + field have a key with a plural name. See :class:`Metadata` whose attributes + match the keys of this dictionary. + + Core metadata fields that can be specified multiple times are stored as a + list or dict depending on which is appropriate for the field. Any fields + which hold multiple values in a single field are stored as a list. + + """ + + # Metadata 1.0 - PEP 241 + metadata_version: str + name: str + version: str + platforms: List[str] + summary: str + description: str + keywords: List[str] + home_page: str + author: str + author_email: str + license: str + + # Metadata 1.1 - PEP 314 + supported_platforms: List[str] + download_url: str + classifiers: List[str] + requires: List[str] + provides: List[str] + obsoletes: List[str] + + # Metadata 1.2 - PEP 345 + maintainer: str + maintainer_email: str + requires_dist: List[str] + provides_dist: List[str] + obsoletes_dist: List[str] + requires_python: str + requires_external: List[str] + project_urls: Dict[str, str] + + # Metadata 2.0 + # PEP 426 attempted to completely revamp the metadata format + # but got stuck without ever being able to build consensus on + # it and ultimately ended up withdrawn. + # + # However, a number of tools had started emitting METADATA with + # `2.0` Metadata-Version, so for historical reasons, this version + # was skipped. + + # Metadata 2.1 - PEP 566 + description_content_type: str + provides_extra: List[str] + + # Metadata 2.2 - PEP 643 + dynamic: List[str] + + # Metadata 2.3 - PEP 685 + # No new fields were added in PEP 685, just some edge case were + # tightened up to provide better interoptability. + + +_STRING_FIELDS = { + "author", + "author_email", + "description", + "description_content_type", + "download_url", + "home_page", + "license", + "maintainer", + "maintainer_email", + "metadata_version", + "name", + "requires_python", + "summary", + "version", +} + +_LIST_FIELDS = { + "classifiers", + "dynamic", + "obsoletes", + "obsoletes_dist", + "platforms", + "provides", + "provides_dist", + "provides_extra", + "requires", + "requires_dist", + "requires_external", + "supported_platforms", +} + +_DICT_FIELDS = { + "project_urls", +} + + +def _parse_keywords(data: str) -> List[str]: + """Split a string of comma-separate keyboards into a list of keywords.""" + return [k.strip() for k in data.split(",")] + + +def _parse_project_urls(data: List[str]) -> Dict[str, str]: + """Parse a list of label/URL string pairings separated by a comma.""" + urls = {} + for pair in data: + # Our logic is slightly tricky here as we want to try and do + # *something* reasonable with malformed data. + # + # The main thing that we have to worry about, is data that does + # not have a ',' at all to split the label from the Value. There + # isn't a singular right answer here, and we will fail validation + # later on (if the caller is validating) so it doesn't *really* + # matter, but since the missing value has to be an empty str + # and our return value is dict[str, str], if we let the key + # be the missing value, then they'd have multiple '' values that + # overwrite each other in a accumulating dict. + # + # The other potentional issue is that it's possible to have the + # same label multiple times in the metadata, with no solid "right" + # answer with what to do in that case. As such, we'll do the only + # thing we can, which is treat the field as unparseable and add it + # to our list of unparsed fields. + parts = [p.strip() for p in pair.split(",", 1)] + parts.extend([""] * (max(0, 2 - len(parts)))) # Ensure 2 items + + # TODO: The spec doesn't say anything about if the keys should be + # considered case sensitive or not... logically they should + # be case-preserving and case-insensitive, but doing that + # would open up more cases where we might have duplicate + # entries. + label, url = parts + if label in urls: + # The label already exists in our set of urls, so this field + # is unparseable, and we can just add the whole thing to our + # unparseable data and stop processing it. + raise KeyError("duplicate labels in project urls") + urls[label] = url + + return urls + + +def _get_payload(msg: email.message.Message, source: Union[bytes, str]) -> str: + """Get the body of the message.""" + # If our source is a str, then our caller has managed encodings for us, + # and we don't need to deal with it. + if isinstance(source, str): + payload: str = msg.get_payload() + return payload + # If our source is a bytes, then we're managing the encoding and we need + # to deal with it. + else: + bpayload: bytes = msg.get_payload(decode=True) + try: + return bpayload.decode("utf8", "strict") + except UnicodeDecodeError: + raise ValueError("payload in an invalid encoding") + + +# The various parse_FORMAT functions here are intended to be as lenient as +# possible in their parsing, while still returning a correctly typed +# RawMetadata. +# +# To aid in this, we also generally want to do as little touching of the +# data as possible, except where there are possibly some historic holdovers +# that make valid data awkward to work with. +# +# While this is a lower level, intermediate format than our ``Metadata`` +# class, some light touch ups can make a massive difference in usability. + +# Map METADATA fields to RawMetadata. +_EMAIL_TO_RAW_MAPPING = { + "author": "author", + "author-email": "author_email", + "classifier": "classifiers", + "description": "description", + "description-content-type": "description_content_type", + "download-url": "download_url", + "dynamic": "dynamic", + "home-page": "home_page", + "keywords": "keywords", + "license": "license", + "maintainer": "maintainer", + "maintainer-email": "maintainer_email", + "metadata-version": "metadata_version", + "name": "name", + "obsoletes": "obsoletes", + "obsoletes-dist": "obsoletes_dist", + "platform": "platforms", + "project-url": "project_urls", + "provides": "provides", + "provides-dist": "provides_dist", + "provides-extra": "provides_extra", + "requires": "requires", + "requires-dist": "requires_dist", + "requires-external": "requires_external", + "requires-python": "requires_python", + "summary": "summary", + "supported-platform": "supported_platforms", + "version": "version", +} +_RAW_TO_EMAIL_MAPPING = {raw: email for email, raw in _EMAIL_TO_RAW_MAPPING.items()} + + +def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, List[str]]]: + """Parse a distribution's metadata stored as email headers (e.g. from ``METADATA``). + + This function returns a two-item tuple of dicts. The first dict is of + recognized fields from the core metadata specification. Fields that can be + parsed and translated into Python's built-in types are converted + appropriately. All other fields are left as-is. Fields that are allowed to + appear multiple times are stored as lists. + + The second dict contains all other fields from the metadata. This includes + any unrecognized fields. It also includes any fields which are expected to + be parsed into a built-in type but were not formatted appropriately. Finally, + any fields that are expected to appear only once but are repeated are + included in this dict. + + """ + raw: Dict[str, Union[str, List[str], Dict[str, str]]] = {} + unparsed: Dict[str, List[str]] = {} + + if isinstance(data, str): + parsed = email.parser.Parser(policy=email.policy.compat32).parsestr(data) + else: + parsed = email.parser.BytesParser(policy=email.policy.compat32).parsebytes(data) + + # We have to wrap parsed.keys() in a set, because in the case of multiple + # values for a key (a list), the key will appear multiple times in the + # list of keys, but we're avoiding that by using get_all(). + for name in frozenset(parsed.keys()): + # Header names in RFC are case insensitive, so we'll normalize to all + # lower case to make comparisons easier. + name = name.lower() + + # We use get_all() here, even for fields that aren't multiple use, + # because otherwise someone could have e.g. two Name fields, and we + # would just silently ignore it rather than doing something about it. + headers = parsed.get_all(name) or [] + + # The way the email module works when parsing bytes is that it + # unconditionally decodes the bytes as ascii using the surrogateescape + # handler. When you pull that data back out (such as with get_all() ), + # it looks to see if the str has any surrogate escapes, and if it does + # it wraps it in a Header object instead of returning the string. + # + # As such, we'll look for those Header objects, and fix up the encoding. + value = [] + # Flag if we have run into any issues processing the headers, thus + # signalling that the data belongs in 'unparsed'. + valid_encoding = True + for h in headers: + # It's unclear if this can return more types than just a Header or + # a str, so we'll just assert here to make sure. + assert isinstance(h, (email.header.Header, str)) + + # If it's a header object, we need to do our little dance to get + # the real data out of it. In cases where there is invalid data + # we're going to end up with mojibake, but there's no obvious, good + # way around that without reimplementing parts of the Header object + # ourselves. + # + # That should be fine since, if mojibacked happens, this key is + # going into the unparsed dict anyways. + if isinstance(h, email.header.Header): + # The Header object stores it's data as chunks, and each chunk + # can be independently encoded, so we'll need to check each + # of them. + chunks: List[Tuple[bytes, Optional[str]]] = [] + for bin, encoding in email.header.decode_header(h): + try: + bin.decode("utf8", "strict") + except UnicodeDecodeError: + # Enable mojibake. + encoding = "latin1" + valid_encoding = False + else: + encoding = "utf8" + chunks.append((bin, encoding)) + + # Turn our chunks back into a Header object, then let that + # Header object do the right thing to turn them into a + # string for us. + value.append(str(email.header.make_header(chunks))) + # This is already a string, so just add it. + else: + value.append(h) + + # We've processed all of our values to get them into a list of str, + # but we may have mojibake data, in which case this is an unparsed + # field. + if not valid_encoding: + unparsed[name] = value + continue + + raw_name = _EMAIL_TO_RAW_MAPPING.get(name) + if raw_name is None: + # This is a bit of a weird situation, we've encountered a key that + # we don't know what it means, so we don't know whether it's meant + # to be a list or not. + # + # Since we can't really tell one way or another, we'll just leave it + # as a list, even though it may be a single item list, because that's + # what makes the most sense for email headers. + unparsed[name] = value + continue + + # If this is one of our string fields, then we'll check to see if our + # value is a list of a single item. If it is then we'll assume that + # it was emitted as a single string, and unwrap the str from inside + # the list. + # + # If it's any other kind of data, then we haven't the faintest clue + # what we should parse it as, and we have to just add it to our list + # of unparsed stuff. + if raw_name in _STRING_FIELDS and len(value) == 1: + raw[raw_name] = value[0] + # If this is one of our list of string fields, then we can just assign + # the value, since email *only* has strings, and our get_all() call + # above ensures that this is a list. + elif raw_name in _LIST_FIELDS: + raw[raw_name] = value + # Special Case: Keywords + # The keywords field is implemented in the metadata spec as a str, + # but it conceptually is a list of strings, and is serialized using + # ", ".join(keywords), so we'll do some light data massaging to turn + # this into what it logically is. + elif raw_name == "keywords" and len(value) == 1: + raw[raw_name] = _parse_keywords(value[0]) + # Special Case: Project-URL + # The project urls is implemented in the metadata spec as a list of + # specially-formatted strings that represent a key and a value, which + # is fundamentally a mapping, however the email format doesn't support + # mappings in a sane way, so it was crammed into a list of strings + # instead. + # + # We will do a little light data massaging to turn this into a map as + # it logically should be. + elif raw_name == "project_urls": + try: + raw[raw_name] = _parse_project_urls(value) + except KeyError: + unparsed[name] = value + # Nothing that we've done has managed to parse this, so it'll just + # throw it in our unparseable data and move on. + else: + unparsed[name] = value + + # We need to support getting the Description from the message payload in + # addition to getting it from the the headers. This does mean, though, there + # is the possibility of it being set both ways, in which case we put both + # in 'unparsed' since we don't know which is right. + try: + payload = _get_payload(parsed, data) + except ValueError: + unparsed.setdefault("description", []).append( + parsed.get_payload(decode=isinstance(data, bytes)) + ) + else: + if payload: + # Check to see if we've already got a description, if so then both + # it, and this body move to unparseable. + if "description" in raw: + description_header = cast(str, raw.pop("description")) + unparsed.setdefault("description", []).extend( + [description_header, payload] + ) + elif "description" in unparsed: + unparsed["description"].append(payload) + else: + raw["description"] = payload + + # We need to cast our `raw` to a metadata, because a TypedDict only support + # literal key names, but we're computing our key names on purpose, but the + # way this function is implemented, our `TypedDict` can only have valid key + # names. + return cast(RawMetadata, raw), unparsed + + +_NOT_FOUND = object() + + +# Keep the two values in sync. +_VALID_METADATA_VERSIONS = ["1.0", "1.1", "1.2", "2.1", "2.2", "2.3"] +_MetadataVersion = Literal["1.0", "1.1", "1.2", "2.1", "2.2", "2.3"] + +_REQUIRED_ATTRS = frozenset(["metadata_version", "name", "version"]) + + +class _Validator(Generic[T]): + """Validate a metadata field. + + All _process_*() methods correspond to a core metadata field. The method is + called with the field's raw value. If the raw value is valid it is returned + in its "enriched" form (e.g. ``version.Version`` for the ``Version`` field). + If the raw value is invalid, :exc:`InvalidMetadata` is raised (with a cause + as appropriate). + """ + + name: str + raw_name: str + added: _MetadataVersion + + def __init__( + self, + *, + added: _MetadataVersion = "1.0", + ) -> None: + self.added = added + + def __set_name__(self, _owner: "Metadata", name: str) -> None: + self.name = name + self.raw_name = _RAW_TO_EMAIL_MAPPING[name] + + def __get__(self, instance: "Metadata", _owner: Type["Metadata"]) -> T: + # With Python 3.8, the caching can be replaced with functools.cached_property(). + # No need to check the cache as attribute lookup will resolve into the + # instance's __dict__ before __get__ is called. + cache = instance.__dict__ + value = instance._raw.get(self.name) + + # To make the _process_* methods easier, we'll check if the value is None + # and if this field is NOT a required attribute, and if both of those + # things are true, we'll skip the the converter. This will mean that the + # converters never have to deal with the None union. + if self.name in _REQUIRED_ATTRS or value is not None: + try: + converter: Callable[[Any], T] = getattr(self, f"_process_{self.name}") + except AttributeError: + pass + else: + value = converter(value) + + cache[self.name] = value + try: + del instance._raw[self.name] # type: ignore[misc] + except KeyError: + pass + + return cast(T, value) + + def _invalid_metadata( + self, msg: str, cause: Optional[Exception] = None + ) -> InvalidMetadata: + exc = InvalidMetadata( + self.raw_name, msg.format_map({"field": repr(self.raw_name)}) + ) + exc.__cause__ = cause + return exc + + def _process_metadata_version(self, value: str) -> _MetadataVersion: + # Implicitly makes Metadata-Version required. + if value not in _VALID_METADATA_VERSIONS: + raise self._invalid_metadata(f"{value!r} is not a valid metadata version") + return cast(_MetadataVersion, value) + + def _process_name(self, value: str) -> str: + if not value: + raise self._invalid_metadata("{field} is a required field") + # Validate the name as a side-effect. + try: + utils.canonicalize_name(value, validate=True) + except utils.InvalidName as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) + else: + return value + + def _process_version(self, value: str) -> version_module.Version: + if not value: + raise self._invalid_metadata("{field} is a required field") + try: + return version_module.parse(value) + except version_module.InvalidVersion as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) + + def _process_summary(self, value: str) -> str: + """Check the field contains no newlines.""" + if "\n" in value: + raise self._invalid_metadata("{field} must be a single line") + return value + + def _process_description_content_type(self, value: str) -> str: + content_types = {"text/plain", "text/x-rst", "text/markdown"} + message = email.message.EmailMessage() + message["content-type"] = value + + content_type, parameters = ( + # Defaults to `text/plain` if parsing failed. + message.get_content_type().lower(), + message["content-type"].params, + ) + # Check if content-type is valid or defaulted to `text/plain` and thus was + # not parseable. + if content_type not in content_types or content_type not in value.lower(): + raise self._invalid_metadata( + f"{{field}} must be one of {list(content_types)}, not {value!r}" + ) + + charset = parameters.get("charset", "UTF-8") + if charset != "UTF-8": + raise self._invalid_metadata( + f"{{field}} can only specify the UTF-8 charset, not {list(charset)}" + ) + + markdown_variants = {"GFM", "CommonMark"} + variant = parameters.get("variant", "GFM") # Use an acceptable default. + if content_type == "text/markdown" and variant not in markdown_variants: + raise self._invalid_metadata( + f"valid Markdown variants for {{field}} are {list(markdown_variants)}, " + f"not {variant!r}", + ) + return value + + def _process_dynamic(self, value: List[str]) -> List[str]: + for dynamic_field in map(str.lower, value): + if dynamic_field in {"name", "version", "metadata-version"}: + raise self._invalid_metadata( + f"{value!r} is not allowed as a dynamic field" + ) + elif dynamic_field not in _EMAIL_TO_RAW_MAPPING: + raise self._invalid_metadata(f"{value!r} is not a valid dynamic field") + return list(map(str.lower, value)) + + def _process_provides_extra( + self, + value: List[str], + ) -> List[utils.NormalizedName]: + normalized_names = [] + try: + for name in value: + normalized_names.append(utils.canonicalize_name(name, validate=True)) + except utils.InvalidName as exc: + raise self._invalid_metadata( + f"{name!r} is invalid for {{field}}", cause=exc + ) + else: + return normalized_names + + def _process_requires_python(self, value: str) -> specifiers.SpecifierSet: + try: + return specifiers.SpecifierSet(value) + except specifiers.InvalidSpecifier as exc: + raise self._invalid_metadata( + f"{value!r} is invalid for {{field}}", cause=exc + ) + + def _process_requires_dist( + self, + value: List[str], + ) -> List[requirements.Requirement]: + reqs = [] + try: + for req in value: + reqs.append(requirements.Requirement(req)) + except requirements.InvalidRequirement as exc: + raise self._invalid_metadata(f"{req!r} is invalid for {{field}}", cause=exc) + else: + return reqs + + +class Metadata: + """Representation of distribution metadata. + + Compared to :class:`RawMetadata`, this class provides objects representing + metadata fields instead of only using built-in types. Any invalid metadata + will cause :exc:`InvalidMetadata` to be raised (with a + :py:attr:`~BaseException.__cause__` attribute as appropriate). + """ + + _raw: RawMetadata + + @classmethod + def from_raw(cls, data: RawMetadata, *, validate: bool = True) -> "Metadata": + """Create an instance from :class:`RawMetadata`. + + If *validate* is true, all metadata will be validated. All exceptions + related to validation will be gathered and raised as an :class:`ExceptionGroup`. + """ + ins = cls() + ins._raw = data.copy() # Mutations occur due to caching enriched values. + + if validate: + exceptions: List[Exception] = [] + try: + metadata_version = ins.metadata_version + metadata_age = _VALID_METADATA_VERSIONS.index(metadata_version) + except InvalidMetadata as metadata_version_exc: + exceptions.append(metadata_version_exc) + metadata_version = None + + # Make sure to check for the fields that are present, the required + # fields (so their absence can be reported). + fields_to_check = frozenset(ins._raw) | _REQUIRED_ATTRS + # Remove fields that have already been checked. + fields_to_check -= {"metadata_version"} + + for key in fields_to_check: + try: + if metadata_version: + # Can't use getattr() as that triggers descriptor protocol which + # will fail due to no value for the instance argument. + try: + field_metadata_version = cls.__dict__[key].added + except KeyError: + exc = InvalidMetadata(key, f"unrecognized field: {key!r}") + exceptions.append(exc) + continue + field_age = _VALID_METADATA_VERSIONS.index( + field_metadata_version + ) + if field_age > metadata_age: + field = _RAW_TO_EMAIL_MAPPING[key] + exc = InvalidMetadata( + field, + "{field} introduced in metadata version " + "{field_metadata_version}, not {metadata_version}", + ) + exceptions.append(exc) + continue + getattr(ins, key) + except InvalidMetadata as exc: + exceptions.append(exc) + + if exceptions: + raise ExceptionGroup("invalid metadata", exceptions) + + return ins + + @classmethod + def from_email( + cls, data: Union[bytes, str], *, validate: bool = True + ) -> "Metadata": + """Parse metadata from email headers. + + If *validate* is true, the metadata will be validated. All exceptions + related to validation will be gathered and raised as an :class:`ExceptionGroup`. + """ + raw, unparsed = parse_email(data) + + if validate: + exceptions: list[Exception] = [] + for unparsed_key in unparsed: + if unparsed_key in _EMAIL_TO_RAW_MAPPING: + message = f"{unparsed_key!r} has invalid data" + else: + message = f"unrecognized field: {unparsed_key!r}" + exceptions.append(InvalidMetadata(unparsed_key, message)) + + if exceptions: + raise ExceptionGroup("unparsed", exceptions) + + try: + return cls.from_raw(raw, validate=validate) + except ExceptionGroup as exc_group: + raise ExceptionGroup( + "invalid or unparsed metadata", exc_group.exceptions + ) from None + + metadata_version: _Validator[_MetadataVersion] = _Validator() + """:external:ref:`core-metadata-metadata-version` + (required; validated to be a valid metadata version)""" + name: _Validator[str] = _Validator() + """:external:ref:`core-metadata-name` + (required; validated using :func:`~packaging.utils.canonicalize_name` and its + *validate* parameter)""" + version: _Validator[version_module.Version] = _Validator() + """:external:ref:`core-metadata-version` (required)""" + dynamic: _Validator[Optional[List[str]]] = _Validator( + added="2.2", + ) + """:external:ref:`core-metadata-dynamic` + (validated against core metadata field names and lowercased)""" + platforms: _Validator[Optional[List[str]]] = _Validator() + """:external:ref:`core-metadata-platform`""" + supported_platforms: _Validator[Optional[List[str]]] = _Validator(added="1.1") + """:external:ref:`core-metadata-supported-platform`""" + summary: _Validator[Optional[str]] = _Validator() + """:external:ref:`core-metadata-summary` (validated to contain no newlines)""" + description: _Validator[Optional[str]] = _Validator() # TODO 2.1: can be in body + """:external:ref:`core-metadata-description`""" + description_content_type: _Validator[Optional[str]] = _Validator(added="2.1") + """:external:ref:`core-metadata-description-content-type` (validated)""" + keywords: _Validator[Optional[List[str]]] = _Validator() + """:external:ref:`core-metadata-keywords`""" + home_page: _Validator[Optional[str]] = _Validator() + """:external:ref:`core-metadata-home-page`""" + download_url: _Validator[Optional[str]] = _Validator(added="1.1") + """:external:ref:`core-metadata-download-url`""" + author: _Validator[Optional[str]] = _Validator() + """:external:ref:`core-metadata-author`""" + author_email: _Validator[Optional[str]] = _Validator() + """:external:ref:`core-metadata-author-email`""" + maintainer: _Validator[Optional[str]] = _Validator(added="1.2") + """:external:ref:`core-metadata-maintainer`""" + maintainer_email: _Validator[Optional[str]] = _Validator(added="1.2") + """:external:ref:`core-metadata-maintainer-email`""" + license: _Validator[Optional[str]] = _Validator() + """:external:ref:`core-metadata-license`""" + classifiers: _Validator[Optional[List[str]]] = _Validator(added="1.1") + """:external:ref:`core-metadata-classifier`""" + requires_dist: _Validator[Optional[List[requirements.Requirement]]] = _Validator( + added="1.2" + ) + """:external:ref:`core-metadata-requires-dist`""" + requires_python: _Validator[Optional[specifiers.SpecifierSet]] = _Validator( + added="1.2" + ) + """:external:ref:`core-metadata-requires-python`""" + # Because `Requires-External` allows for non-PEP 440 version specifiers, we + # don't do any processing on the values. + requires_external: _Validator[Optional[List[str]]] = _Validator(added="1.2") + """:external:ref:`core-metadata-requires-external`""" + project_urls: _Validator[Optional[Dict[str, str]]] = _Validator(added="1.2") + """:external:ref:`core-metadata-project-url`""" + # PEP 685 lets us raise an error if an extra doesn't pass `Name` validation + # regardless of metadata version. + provides_extra: _Validator[Optional[List[utils.NormalizedName]]] = _Validator( + added="2.1", + ) + """:external:ref:`core-metadata-provides-extra`""" + provides_dist: _Validator[Optional[List[str]]] = _Validator(added="1.2") + """:external:ref:`core-metadata-provides-dist`""" + obsoletes_dist: _Validator[Optional[List[str]]] = _Validator(added="1.2") + """:external:ref:`core-metadata-obsoletes-dist`""" + requires: _Validator[Optional[List[str]]] = _Validator(added="1.1") + """``Requires`` (deprecated)""" + provides: _Validator[Optional[List[str]]] = _Validator(added="1.1") + """``Provides`` (deprecated)""" + obsoletes: _Validator[Optional[List[str]]] = _Validator(added="1.1") + """``Obsoletes`` (deprecated)""" diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging/requirements.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging/requirements.py new file mode 100644 index 0000000000000000000000000000000000000000..bdc43a7e98d87dba0c2069bfb4554f71d228cad4 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging/requirements.py @@ -0,0 +1,90 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from typing import Any, Iterator, Optional, Set + +from ._parser import parse_requirement as _parse_requirement +from ._tokenizer import ParserSyntaxError +from .markers import Marker, _normalize_extra_values +from .specifiers import SpecifierSet +from .utils import canonicalize_name + + +class InvalidRequirement(ValueError): + """ + An invalid requirement was found, users should refer to PEP 508. + """ + + +class Requirement: + """Parse a requirement. + + Parse a given requirement string into its parts, such as name, specifier, + URL, and extras. Raises InvalidRequirement on a badly-formed requirement + string. + """ + + # TODO: Can we test whether something is contained within a requirement? + # If so how do we do that? Do we need to test against the _name_ of + # the thing as well as the version? What about the markers? + # TODO: Can we normalize the name and extra name? + + def __init__(self, requirement_string: str) -> None: + try: + parsed = _parse_requirement(requirement_string) + except ParserSyntaxError as e: + raise InvalidRequirement(str(e)) from e + + self.name: str = parsed.name + self.url: Optional[str] = parsed.url or None + self.extras: Set[str] = set(parsed.extras or []) + self.specifier: SpecifierSet = SpecifierSet(parsed.specifier) + self.marker: Optional[Marker] = None + if parsed.marker is not None: + self.marker = Marker.__new__(Marker) + self.marker._markers = _normalize_extra_values(parsed.marker) + + def _iter_parts(self, name: str) -> Iterator[str]: + yield name + + if self.extras: + formatted_extras = ",".join(sorted(self.extras)) + yield f"[{formatted_extras}]" + + if self.specifier: + yield str(self.specifier) + + if self.url: + yield f"@ {self.url}" + if self.marker: + yield " " + + if self.marker: + yield f"; {self.marker}" + + def __str__(self) -> str: + return "".join(self._iter_parts(self.name)) + + def __repr__(self) -> str: + return f"" + + def __hash__(self) -> int: + return hash( + ( + self.__class__.__name__, + *self._iter_parts(canonicalize_name(self.name)), + ) + ) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, Requirement): + return NotImplemented + + return ( + canonicalize_name(self.name) == canonicalize_name(other.name) + and self.extras == other.extras + and self.specifier == other.specifier + and self.url == other.url + and self.marker == other.marker + ) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging/specifiers.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging/specifiers.py new file mode 100644 index 0000000000000000000000000000000000000000..2d015bab5958fd9767cf5c9e449f2fa33292c962 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging/specifiers.py @@ -0,0 +1,1017 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +""" +.. testsetup:: + + from packaging.specifiers import Specifier, SpecifierSet, InvalidSpecifier + from packaging.version import Version +""" + +import abc +import itertools +import re +from typing import Callable, Iterable, Iterator, List, Optional, Tuple, TypeVar, Union + +from .utils import canonicalize_version +from .version import Version + +UnparsedVersion = Union[Version, str] +UnparsedVersionVar = TypeVar("UnparsedVersionVar", bound=UnparsedVersion) +CallableOperator = Callable[[Version, str], bool] + + +def _coerce_version(version: UnparsedVersion) -> Version: + if not isinstance(version, Version): + version = Version(version) + return version + + +class InvalidSpecifier(ValueError): + """ + Raised when attempting to create a :class:`Specifier` with a specifier + string that is invalid. + + >>> Specifier("lolwat") + Traceback (most recent call last): + ... + packaging.specifiers.InvalidSpecifier: Invalid specifier: 'lolwat' + """ + + +class BaseSpecifier(metaclass=abc.ABCMeta): + @abc.abstractmethod + def __str__(self) -> str: + """ + Returns the str representation of this Specifier-like object. This + should be representative of the Specifier itself. + """ + + @abc.abstractmethod + def __hash__(self) -> int: + """ + Returns a hash value for this Specifier-like object. + """ + + @abc.abstractmethod + def __eq__(self, other: object) -> bool: + """ + Returns a boolean representing whether or not the two Specifier-like + objects are equal. + + :param other: The other object to check against. + """ + + @property + @abc.abstractmethod + def prereleases(self) -> Optional[bool]: + """Whether or not pre-releases as a whole are allowed. + + This can be set to either ``True`` or ``False`` to explicitly enable or disable + prereleases or it can be set to ``None`` (the default) to use default semantics. + """ + + @prereleases.setter + def prereleases(self, value: bool) -> None: + """Setter for :attr:`prereleases`. + + :param value: The value to set. + """ + + @abc.abstractmethod + def contains(self, item: str, prereleases: Optional[bool] = None) -> bool: + """ + Determines if the given item is contained within this specifier. + """ + + @abc.abstractmethod + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: + """ + Takes an iterable of items and filters them so that only items which + are contained within this specifier are allowed in it. + """ + + +class Specifier(BaseSpecifier): + """This class abstracts handling of version specifiers. + + .. tip:: + + It is generally not required to instantiate this manually. You should instead + prefer to work with :class:`SpecifierSet` instead, which can parse + comma-separated version specifiers (which is what package metadata contains). + """ + + _operator_regex_str = r""" + (?P(~=|==|!=|<=|>=|<|>|===)) + """ + _version_regex_str = r""" + (?P + (?: + # The identity operators allow for an escape hatch that will + # do an exact string match of the version you wish to install. + # This will not be parsed by PEP 440 and we cannot determine + # any semantic meaning from it. This operator is discouraged + # but included entirely as an escape hatch. + (?<====) # Only match for the identity operator + \s* + [^\s;)]* # The arbitrary version can be just about anything, + # we match everything except for whitespace, a + # semi-colon for marker support, and a closing paren + # since versions can be enclosed in them. + ) + | + (?: + # The (non)equality operators allow for wild card and local + # versions to be specified so we have to define these two + # operators separately to enable that. + (?<===|!=) # Only match for equals and not equals + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)* # release + + # You cannot use a wild card and a pre-release, post-release, a dev or + # local version together so group them with a | and make them optional. + (?: + \.\* # Wild card syntax of .* + | + (?: # pre release + [-_\.]? + (alpha|beta|preview|pre|a|b|c|rc) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local + )? + ) + | + (?: + # The compatible operator requires at least two digits in the + # release segment. + (?<=~=) # Only match for the compatible operator + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) + (?: # pre release + [-_\.]? + (alpha|beta|preview|pre|a|b|c|rc) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + ) + | + (?: + # All other operators only allow a sub set of what the + # (non)equality operators do. Specifically they do not allow + # local versions to be specified nor do they allow the prefix + # matching wild cards. + (?=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + "===": "arbitrary", + } + + def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: + """Initialize a Specifier instance. + + :param spec: + The string representation of a specifier which will be parsed and + normalized before use. + :param prereleases: + This tells the specifier if it should accept prerelease versions if + applicable or not. The default of ``None`` will autodetect it from the + given specifiers. + :raises InvalidSpecifier: + If the given specifier is invalid (i.e. bad syntax). + """ + match = self._regex.search(spec) + if not match: + raise InvalidSpecifier(f"Invalid specifier: '{spec}'") + + self._spec: Tuple[str, str] = ( + match.group("operator").strip(), + match.group("version").strip(), + ) + + # Store whether or not this Specifier should accept prereleases + self._prereleases = prereleases + + # https://github.com/python/mypy/pull/13475#pullrequestreview-1079784515 + @property # type: ignore[override] + def prereleases(self) -> bool: + # If there is an explicit prereleases set for this, then we'll just + # blindly use that. + if self._prereleases is not None: + return self._prereleases + + # Look at all of our specifiers and determine if they are inclusive + # operators, and if they are if they are including an explicit + # prerelease. + operator, version = self._spec + if operator in ["==", ">=", "<=", "~=", "==="]: + # The == specifier can include a trailing .*, if it does we + # want to remove before parsing. + if operator == "==" and version.endswith(".*"): + version = version[:-2] + + # Parse the version, and if it is a pre-release than this + # specifier allows pre-releases. + if Version(version).is_prerelease: + return True + + return False + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + @property + def operator(self) -> str: + """The operator of this specifier. + + >>> Specifier("==1.2.3").operator + '==' + """ + return self._spec[0] + + @property + def version(self) -> str: + """The version of this specifier. + + >>> Specifier("==1.2.3").version + '1.2.3' + """ + return self._spec[1] + + def __repr__(self) -> str: + """A representation of the Specifier that shows all internal state. + + >>> Specifier('>=1.0.0') + =1.0.0')> + >>> Specifier('>=1.0.0', prereleases=False) + =1.0.0', prereleases=False)> + >>> Specifier('>=1.0.0', prereleases=True) + =1.0.0', prereleases=True)> + """ + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return f"<{self.__class__.__name__}({str(self)!r}{pre})>" + + def __str__(self) -> str: + """A string representation of the Specifier that can be round-tripped. + + >>> str(Specifier('>=1.0.0')) + '>=1.0.0' + >>> str(Specifier('>=1.0.0', prereleases=False)) + '>=1.0.0' + """ + return "{}{}".format(*self._spec) + + @property + def _canonical_spec(self) -> Tuple[str, str]: + canonical_version = canonicalize_version( + self._spec[1], + strip_trailing_zero=(self._spec[0] != "~="), + ) + return self._spec[0], canonical_version + + def __hash__(self) -> int: + return hash(self._canonical_spec) + + def __eq__(self, other: object) -> bool: + """Whether or not the two Specifier-like objects are equal. + + :param other: The other object to check against. + + The value of :attr:`prereleases` is ignored. + + >>> Specifier("==1.2.3") == Specifier("== 1.2.3.0") + True + >>> (Specifier("==1.2.3", prereleases=False) == + ... Specifier("==1.2.3", prereleases=True)) + True + >>> Specifier("==1.2.3") == "==1.2.3" + True + >>> Specifier("==1.2.3") == Specifier("==1.2.4") + False + >>> Specifier("==1.2.3") == Specifier("~=1.2.3") + False + """ + if isinstance(other, str): + try: + other = self.__class__(str(other)) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._canonical_spec == other._canonical_spec + + def _get_operator(self, op: str) -> CallableOperator: + operator_callable: CallableOperator = getattr( + self, f"_compare_{self._operators[op]}" + ) + return operator_callable + + def _compare_compatible(self, prospective: Version, spec: str) -> bool: + + # Compatible releases have an equivalent combination of >= and ==. That + # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to + # implement this in terms of the other specifiers instead of + # implementing it ourselves. The only thing we need to do is construct + # the other specifiers. + + # We want everything but the last item in the version, but we want to + # ignore suffix segments. + prefix = _version_join( + list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1] + ) + + # Add the prefix notation to the end of our string + prefix += ".*" + + return self._get_operator(">=")(prospective, spec) and self._get_operator("==")( + prospective, prefix + ) + + def _compare_equal(self, prospective: Version, spec: str) -> bool: + + # We need special logic to handle prefix matching + if spec.endswith(".*"): + # In the case of prefix matching we want to ignore local segment. + normalized_prospective = canonicalize_version( + prospective.public, strip_trailing_zero=False + ) + # Get the normalized version string ignoring the trailing .* + normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False) + # Split the spec out by bangs and dots, and pretend that there is + # an implicit dot in between a release segment and a pre-release segment. + split_spec = _version_split(normalized_spec) + + # Split the prospective version out by bangs and dots, and pretend + # that there is an implicit dot in between a release segment and + # a pre-release segment. + split_prospective = _version_split(normalized_prospective) + + # 0-pad the prospective version before shortening it to get the correct + # shortened version. + padded_prospective, _ = _pad_version(split_prospective, split_spec) + + # Shorten the prospective version to be the same length as the spec + # so that we can determine if the specifier is a prefix of the + # prospective version or not. + shortened_prospective = padded_prospective[: len(split_spec)] + + return shortened_prospective == split_spec + else: + # Convert our spec string into a Version + spec_version = Version(spec) + + # If the specifier does not have a local segment, then we want to + # act as if the prospective version also does not have a local + # segment. + if not spec_version.local: + prospective = Version(prospective.public) + + return prospective == spec_version + + def _compare_not_equal(self, prospective: Version, spec: str) -> bool: + return not self._compare_equal(prospective, spec) + + def _compare_less_than_equal(self, prospective: Version, spec: str) -> bool: + + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) <= Version(spec) + + def _compare_greater_than_equal(self, prospective: Version, spec: str) -> bool: + + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) >= Version(spec) + + def _compare_less_than(self, prospective: Version, spec_str: str) -> bool: + + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is less than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective < spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a pre-release version, that we do not accept pre-release + # versions for the version mentioned in the specifier (e.g. <3.1 should + # not match 3.1.dev0, but should match 3.0.dev0). + if not spec.is_prerelease and prospective.is_prerelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # less than the spec version *and* it's not a pre-release of the same + # version in the spec. + return True + + def _compare_greater_than(self, prospective: Version, spec_str: str) -> bool: + + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is greater than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective > spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a post-release version, that we do not accept + # post-release versions for the version mentioned in the specifier + # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). + if not spec.is_postrelease and prospective.is_postrelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # Ensure that we do not allow a local version of the version mentioned + # in the specifier, which is technically greater than, to match. + if prospective.local is not None: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # greater than the spec version *and* it's not a pre-release of the + # same version in the spec. + return True + + def _compare_arbitrary(self, prospective: Version, spec: str) -> bool: + return str(prospective).lower() == str(spec).lower() + + def __contains__(self, item: Union[str, Version]) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: The item to check for. + + This is used for the ``in`` operator and behaves the same as + :meth:`contains` with no ``prereleases`` argument passed. + + >>> "1.2.3" in Specifier(">=1.2.3") + True + >>> Version("1.2.3") in Specifier(">=1.2.3") + True + >>> "1.0.0" in Specifier(">=1.2.3") + False + >>> "1.3.0a1" in Specifier(">=1.2.3") + False + >>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True) + True + """ + return self.contains(item) + + def contains( + self, item: UnparsedVersion, prereleases: Optional[bool] = None + ) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: + The item to check for, which can be a version string or a + :class:`Version` instance. + :param prereleases: + Whether or not to match prereleases with this Specifier. If set to + ``None`` (the default), it uses :attr:`prereleases` to determine + whether or not prereleases are allowed. + + >>> Specifier(">=1.2.3").contains("1.2.3") + True + >>> Specifier(">=1.2.3").contains(Version("1.2.3")) + True + >>> Specifier(">=1.2.3").contains("1.0.0") + False + >>> Specifier(">=1.2.3").contains("1.3.0a1") + False + >>> Specifier(">=1.2.3", prereleases=True).contains("1.3.0a1") + True + >>> Specifier(">=1.2.3").contains("1.3.0a1", prereleases=True) + True + """ + + # Determine if prereleases are to be allowed or not. + if prereleases is None: + prereleases = self.prereleases + + # Normalize item to a Version, this allows us to have a shortcut for + # "2.0" in Specifier(">=2") + normalized_item = _coerce_version(item) + + # Determine if we should be supporting prereleases in this specifier + # or not, if we do not support prereleases than we can short circuit + # logic if this version is a prereleases. + if normalized_item.is_prerelease and not prereleases: + return False + + # Actually do the comparison to determine if this item is contained + # within this Specifier or not. + operator_callable: CallableOperator = self._get_operator(self.operator) + return operator_callable(normalized_item, self.version) + + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: + """Filter items in the given iterable, that match the specifier. + + :param iterable: + An iterable that can contain version strings and :class:`Version` instances. + The items in the iterable will be filtered according to the specifier. + :param prereleases: + Whether or not to allow prereleases in the returned iterator. If set to + ``None`` (the default), it will be intelligently decide whether to allow + prereleases or not (based on the :attr:`prereleases` attribute, and + whether the only versions matching are prereleases). + + This method is smarter than just ``filter(Specifier().contains, [...])`` + because it implements the rule from :pep:`440` that a prerelease item + SHOULD be accepted if no other versions match the given specifier. + + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) + ['1.3'] + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")])) + ['1.2.3', '1.3', ] + >>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"])) + ['1.5a1'] + >>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + >>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + """ + + yielded = False + found_prereleases = [] + + kw = {"prereleases": prereleases if prereleases is not None else True} + + # Attempt to iterate over all the values in the iterable and if any of + # them match, yield them. + for version in iterable: + parsed_version = _coerce_version(version) + + if self.contains(parsed_version, **kw): + # If our version is a prerelease, and we were not set to allow + # prereleases, then we'll store it for later in case nothing + # else matches this specifier. + if parsed_version.is_prerelease and not ( + prereleases or self.prereleases + ): + found_prereleases.append(version) + # Either this is not a prerelease, or we should have been + # accepting prereleases from the beginning. + else: + yielded = True + yield version + + # Now that we've iterated over everything, determine if we've yielded + # any values, and if we have not and we have any prereleases stored up + # then we will go ahead and yield the prereleases. + if not yielded and found_prereleases: + for version in found_prereleases: + yield version + + +_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") + + +def _version_split(version: str) -> List[str]: + """Split version into components. + + The split components are intended for version comparison. The logic does + not attempt to retain the original version string, so joining the + components back with :func:`_version_join` may not produce the original + version string. + """ + result: List[str] = [] + + epoch, _, rest = version.rpartition("!") + result.append(epoch or "0") + + for item in rest.split("."): + match = _prefix_regex.search(item) + if match: + result.extend(match.groups()) + else: + result.append(item) + return result + + +def _version_join(components: List[str]) -> str: + """Join split version components into a version string. + + This function assumes the input came from :func:`_version_split`, where the + first component must be the epoch (either empty or numeric), and all other + components numeric. + """ + epoch, *rest = components + return f"{epoch}!{'.'.join(rest)}" + + +def _is_not_suffix(segment: str) -> bool: + return not any( + segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post") + ) + + +def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]: + left_split, right_split = [], [] + + # Get the release segment of our versions + left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) + right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) + + # Get the rest of our versions + left_split.append(left[len(left_split[0]) :]) + right_split.append(right[len(right_split[0]) :]) + + # Insert our padding + left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) + right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) + + return ( + list(itertools.chain.from_iterable(left_split)), + list(itertools.chain.from_iterable(right_split)), + ) + + +class SpecifierSet(BaseSpecifier): + """This class abstracts handling of a set of version specifiers. + + It can be passed a single specifier (``>=3.0``), a comma-separated list of + specifiers (``>=3.0,!=3.1``), or no specifier at all. + """ + + def __init__( + self, specifiers: str = "", prereleases: Optional[bool] = None + ) -> None: + """Initialize a SpecifierSet instance. + + :param specifiers: + The string representation of a specifier or a comma-separated list of + specifiers which will be parsed and normalized before use. + :param prereleases: + This tells the SpecifierSet if it should accept prerelease versions if + applicable or not. The default of ``None`` will autodetect it from the + given specifiers. + + :raises InvalidSpecifier: + If the given ``specifiers`` are not parseable than this exception will be + raised. + """ + + # Split on `,` to break each individual specifier into it's own item, and + # strip each item to remove leading/trailing whitespace. + split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] + + # Make each individual specifier a Specifier and save in a frozen set for later. + self._specs = frozenset(map(Specifier, split_specifiers)) + + # Store our prereleases value so we can use it later to determine if + # we accept prereleases or not. + self._prereleases = prereleases + + @property + def prereleases(self) -> Optional[bool]: + # If we have been given an explicit prerelease modifier, then we'll + # pass that through here. + if self._prereleases is not None: + return self._prereleases + + # If we don't have any specifiers, and we don't have a forced value, + # then we'll just return None since we don't know if this should have + # pre-releases or not. + if not self._specs: + return None + + # Otherwise we'll see if any of the given specifiers accept + # prereleases, if any of them do we'll return True, otherwise False. + return any(s.prereleases for s in self._specs) + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + def __repr__(self) -> str: + """A representation of the specifier set that shows all internal state. + + Note that the ordering of the individual specifiers within the set may not + match the input string. + + >>> SpecifierSet('>=1.0.0,!=2.0.0') + =1.0.0')> + >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False) + =1.0.0', prereleases=False)> + >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True) + =1.0.0', prereleases=True)> + """ + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return f"" + + def __str__(self) -> str: + """A string representation of the specifier set that can be round-tripped. + + Note that the ordering of the individual specifiers within the set may not + match the input string. + + >>> str(SpecifierSet(">=1.0.0,!=1.0.1")) + '!=1.0.1,>=1.0.0' + >>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False)) + '!=1.0.1,>=1.0.0' + """ + return ",".join(sorted(str(s) for s in self._specs)) + + def __hash__(self) -> int: + return hash(self._specs) + + def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet": + """Return a SpecifierSet which is a combination of the two sets. + + :param other: The other object to combine with. + + >>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1' + =1.0.0')> + >>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1') + =1.0.0')> + """ + if isinstance(other, str): + other = SpecifierSet(other) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + specifier = SpecifierSet() + specifier._specs = frozenset(self._specs | other._specs) + + if self._prereleases is None and other._prereleases is not None: + specifier._prereleases = other._prereleases + elif self._prereleases is not None and other._prereleases is None: + specifier._prereleases = self._prereleases + elif self._prereleases == other._prereleases: + specifier._prereleases = self._prereleases + else: + raise ValueError( + "Cannot combine SpecifierSets with True and False prerelease " + "overrides." + ) + + return specifier + + def __eq__(self, other: object) -> bool: + """Whether or not the two SpecifierSet-like objects are equal. + + :param other: The other object to check against. + + The value of :attr:`prereleases` is ignored. + + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) == + ... SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)) + True + >>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1" + True + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2") + False + """ + if isinstance(other, (str, Specifier)): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs == other._specs + + def __len__(self) -> int: + """Returns the number of specifiers in this specifier set.""" + return len(self._specs) + + def __iter__(self) -> Iterator[Specifier]: + """ + Returns an iterator over all the underlying :class:`Specifier` instances + in this specifier set. + + >>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str) + [, =1.0.0')>] + """ + return iter(self._specs) + + def __contains__(self, item: UnparsedVersion) -> bool: + """Return whether or not the item is contained in this specifier. + + :param item: The item to check for. + + This is used for the ``in`` operator and behaves the same as + :meth:`contains` with no ``prereleases`` argument passed. + + >>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1") + True + >>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1") + False + >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1") + False + >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True) + True + """ + return self.contains(item) + + def contains( + self, + item: UnparsedVersion, + prereleases: Optional[bool] = None, + installed: Optional[bool] = None, + ) -> bool: + """Return whether or not the item is contained in this SpecifierSet. + + :param item: + The item to check for, which can be a version string or a + :class:`Version` instance. + :param prereleases: + Whether or not to match prereleases with this SpecifierSet. If set to + ``None`` (the default), it uses :attr:`prereleases` to determine + whether or not prereleases are allowed. + + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3") + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3")) + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1") + False + >>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True).contains("1.3.0a1") + True + >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True) + True + """ + # Ensure that our item is a Version instance. + if not isinstance(item, Version): + item = Version(item) + + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # We can determine if we're going to allow pre-releases by looking to + # see if any of the underlying items supports them. If none of them do + # and this item is a pre-release then we do not allow it and we can + # short circuit that here. + # Note: This means that 1.0.dev1 would not be contained in something + # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 + if not prereleases and item.is_prerelease: + return False + + if installed and item.is_prerelease: + item = Version(item.base_version) + + # We simply dispatch to the underlying specs here to make sure that the + # given version is contained within all of them. + # Note: This use of all() here means that an empty set of specifiers + # will always return True, this is an explicit design decision. + return all(s.contains(item, prereleases=prereleases) for s in self._specs) + + def filter( + self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None + ) -> Iterator[UnparsedVersionVar]: + """Filter items in the given iterable, that match the specifiers in this set. + + :param iterable: + An iterable that can contain version strings and :class:`Version` instances. + The items in the iterable will be filtered according to the specifier. + :param prereleases: + Whether or not to allow prereleases in the returned iterator. If set to + ``None`` (the default), it will be intelligently decide whether to allow + prereleases or not (based on the :attr:`prereleases` attribute, and + whether the only versions matching are prereleases). + + This method is smarter than just ``filter(SpecifierSet(...).contains, [...])`` + because it implements the rule from :pep:`440` that a prerelease item + SHOULD be accepted if no other versions match the given specifier. + + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) + ['1.3'] + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")])) + ['1.3', ] + >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"])) + [] + >>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + >>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + + An "empty" SpecifierSet will filter items based on the presence of prerelease + versions in the set. + + >>> list(SpecifierSet("").filter(["1.3", "1.5a1"])) + ['1.3'] + >>> list(SpecifierSet("").filter(["1.5a1"])) + ['1.5a1'] + >>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"])) + ['1.3', '1.5a1'] + >>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True)) + ['1.3', '1.5a1'] + """ + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # If we have any specifiers, then we want to wrap our iterable in the + # filter method for each one, this will act as a logical AND amongst + # each specifier. + if self._specs: + for spec in self._specs: + iterable = spec.filter(iterable, prereleases=bool(prereleases)) + return iter(iterable) + # If we do not have any specifiers, then we need to have a rough filter + # which will filter out any pre-releases, unless there are no final + # releases. + else: + filtered: List[UnparsedVersionVar] = [] + found_prereleases: List[UnparsedVersionVar] = [] + + for item in iterable: + parsed_version = _coerce_version(item) + + # Store any item which is a pre-release for later unless we've + # already found a final version or we are accepting prereleases + if parsed_version.is_prerelease and not prereleases: + if not filtered: + found_prereleases.append(item) + else: + filtered.append(item) + + # If we've found no items except for pre-releases, then we'll go + # ahead and use the pre-releases + if not filtered and found_prereleases and prereleases is None: + return iter(found_prereleases) + + return iter(filtered) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging/version.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging/version.py new file mode 100644 index 0000000000000000000000000000000000000000..5faab9bd0dcf28847960162b2b4f13a8a556ef20 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/packaging/version.py @@ -0,0 +1,563 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +""" +.. testsetup:: + + from packaging.version import parse, Version +""" + +import itertools +import re +from typing import Any, Callable, NamedTuple, Optional, SupportsInt, Tuple, Union + +from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType + +__all__ = ["VERSION_PATTERN", "parse", "Version", "InvalidVersion"] + +LocalType = Tuple[Union[int, str], ...] + +CmpPrePostDevType = Union[InfinityType, NegativeInfinityType, Tuple[str, int]] +CmpLocalType = Union[ + NegativeInfinityType, + Tuple[Union[Tuple[int, str], Tuple[NegativeInfinityType, Union[int, str]]], ...], +] +CmpKey = Tuple[ + int, + Tuple[int, ...], + CmpPrePostDevType, + CmpPrePostDevType, + CmpPrePostDevType, + CmpLocalType, +] +VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool] + + +class _Version(NamedTuple): + epoch: int + release: Tuple[int, ...] + dev: Optional[Tuple[str, int]] + pre: Optional[Tuple[str, int]] + post: Optional[Tuple[str, int]] + local: Optional[LocalType] + + +def parse(version: str) -> "Version": + """Parse the given version string. + + >>> parse('1.0.dev1') + + + :param version: The version string to parse. + :raises InvalidVersion: When the version string is not a valid version. + """ + return Version(version) + + +class InvalidVersion(ValueError): + """Raised when a version string is not a valid version. + + >>> Version("invalid") + Traceback (most recent call last): + ... + packaging.version.InvalidVersion: Invalid version: 'invalid' + """ + + +class _BaseVersion: + _key: Tuple[Any, ...] + + def __hash__(self) -> int: + return hash(self._key) + + # Please keep the duplicated `isinstance` check + # in the six comparisons hereunder + # unless you find a way to avoid adding overhead function calls. + def __lt__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key < other._key + + def __le__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key <= other._key + + def __eq__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key == other._key + + def __ge__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key >= other._key + + def __gt__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key > other._key + + def __ne__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key != other._key + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +_VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?Palpha|a|beta|b|preview|pre|c|rc)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+VERSION_PATTERN = _VERSION_PATTERN
+"""
+A string containing the regular expression used to match a valid version.
+
+The pattern is not anchored at either end, and is intended for embedding in larger
+expressions (for example, matching a version number as part of a file name). The
+regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
+flags set.
+
+:meta hide-value:
+"""
+
+
+class Version(_BaseVersion):
+    """This class abstracts handling of a project's versions.
+
+    A :class:`Version` instance is comparison aware and can be compared and
+    sorted using the standard Python interfaces.
+
+    >>> v1 = Version("1.0a5")
+    >>> v2 = Version("1.0")
+    >>> v1
+    
+    >>> v2
+    
+    >>> v1 < v2
+    True
+    >>> v1 == v2
+    False
+    >>> v1 > v2
+    False
+    >>> v1 >= v2
+    False
+    >>> v1 <= v2
+    True
+    """
+
+    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+    _key: CmpKey
+
+    def __init__(self, version: str) -> None:
+        """Initialize a Version object.
+
+        :param version:
+            The string representation of a version which will be parsed and normalized
+            before use.
+        :raises InvalidVersion:
+            If the ``version`` does not conform to PEP 440 in any way then this
+            exception will be raised.
+        """
+
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: '{version}'")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+            post=_parse_letter_version(
+                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+            ),
+            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self) -> str:
+        """A representation of the Version that shows all internal state.
+
+        >>> Version('1.0.0')
+        
+        """
+        return f""
+
+    def __str__(self) -> str:
+        """A string representation of the version that can be rounded-tripped.
+
+        >>> str(Version("1.0a5"))
+        '1.0a5'
+        """
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        # Pre-release
+        if self.pre is not None:
+            parts.append("".join(str(x) for x in self.pre))
+
+        # Post-release
+        if self.post is not None:
+            parts.append(f".post{self.post}")
+
+        # Development release
+        if self.dev is not None:
+            parts.append(f".dev{self.dev}")
+
+        # Local version segment
+        if self.local is not None:
+            parts.append(f"+{self.local}")
+
+        return "".join(parts)
+
+    @property
+    def epoch(self) -> int:
+        """The epoch of the version.
+
+        >>> Version("2.0.0").epoch
+        0
+        >>> Version("1!2.0.0").epoch
+        1
+        """
+        return self._version.epoch
+
+    @property
+    def release(self) -> Tuple[int, ...]:
+        """The components of the "release" segment of the version.
+
+        >>> Version("1.2.3").release
+        (1, 2, 3)
+        >>> Version("2.0.0").release
+        (2, 0, 0)
+        >>> Version("1!2.0.0.post0").release
+        (2, 0, 0)
+
+        Includes trailing zeroes but not the epoch or any pre-release / development /
+        post-release suffixes.
+        """
+        return self._version.release
+
+    @property
+    def pre(self) -> Optional[Tuple[str, int]]:
+        """The pre-release segment of the version.
+
+        >>> print(Version("1.2.3").pre)
+        None
+        >>> Version("1.2.3a1").pre
+        ('a', 1)
+        >>> Version("1.2.3b1").pre
+        ('b', 1)
+        >>> Version("1.2.3rc1").pre
+        ('rc', 1)
+        """
+        return self._version.pre
+
+    @property
+    def post(self) -> Optional[int]:
+        """The post-release number of the version.
+
+        >>> print(Version("1.2.3").post)
+        None
+        >>> Version("1.2.3.post1").post
+        1
+        """
+        return self._version.post[1] if self._version.post else None
+
+    @property
+    def dev(self) -> Optional[int]:
+        """The development number of the version.
+
+        >>> print(Version("1.2.3").dev)
+        None
+        >>> Version("1.2.3.dev1").dev
+        1
+        """
+        return self._version.dev[1] if self._version.dev else None
+
+    @property
+    def local(self) -> Optional[str]:
+        """The local version segment of the version.
+
+        >>> print(Version("1.2.3").local)
+        None
+        >>> Version("1.2.3+abc").local
+        'abc'
+        """
+        if self._version.local:
+            return ".".join(str(x) for x in self._version.local)
+        else:
+            return None
+
+    @property
+    def public(self) -> str:
+        """The public portion of the version.
+
+        >>> Version("1.2.3").public
+        '1.2.3'
+        >>> Version("1.2.3+abc").public
+        '1.2.3'
+        >>> Version("1.2.3+abc.dev1").public
+        '1.2.3'
+        """
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self) -> str:
+        """The "base version" of the version.
+
+        >>> Version("1.2.3").base_version
+        '1.2.3'
+        >>> Version("1.2.3+abc").base_version
+        '1.2.3'
+        >>> Version("1!1.2.3+abc.dev1").base_version
+        '1!1.2.3'
+
+        The "base version" is the public version of the project without any pre or post
+        release markers.
+        """
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        return "".join(parts)
+
+    @property
+    def is_prerelease(self) -> bool:
+        """Whether this version is a pre-release.
+
+        >>> Version("1.2.3").is_prerelease
+        False
+        >>> Version("1.2.3a1").is_prerelease
+        True
+        >>> Version("1.2.3b1").is_prerelease
+        True
+        >>> Version("1.2.3rc1").is_prerelease
+        True
+        >>> Version("1.2.3dev1").is_prerelease
+        True
+        """
+        return self.dev is not None or self.pre is not None
+
+    @property
+    def is_postrelease(self) -> bool:
+        """Whether this version is a post-release.
+
+        >>> Version("1.2.3").is_postrelease
+        False
+        >>> Version("1.2.3.post1").is_postrelease
+        True
+        """
+        return self.post is not None
+
+    @property
+    def is_devrelease(self) -> bool:
+        """Whether this version is a development release.
+
+        >>> Version("1.2.3").is_devrelease
+        False
+        >>> Version("1.2.3.dev1").is_devrelease
+        True
+        """
+        return self.dev is not None
+
+    @property
+    def major(self) -> int:
+        """The first item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").major
+        1
+        """
+        return self.release[0] if len(self.release) >= 1 else 0
+
+    @property
+    def minor(self) -> int:
+        """The second item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").minor
+        2
+        >>> Version("1").minor
+        0
+        """
+        return self.release[1] if len(self.release) >= 2 else 0
+
+    @property
+    def micro(self) -> int:
+        """The third item of :attr:`release` or ``0`` if unavailable.
+
+        >>> Version("1.2.3").micro
+        3
+        >>> Version("1").micro
+        0
+        """
+        return self.release[2] if len(self.release) >= 3 else 0
+
+
+def _parse_letter_version(
+    letter: Optional[str], number: Union[str, bytes, SupportsInt, None]
+) -> Optional[Tuple[str, int]]:
+
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+    return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local: Optional[str]) -> Optional[LocalType]:
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_separators.split(local)
+        )
+    return None
+
+
+def _cmpkey(
+    epoch: int,
+    release: Tuple[int, ...],
+    pre: Optional[Tuple[str, int]],
+    post: Optional[Tuple[str, int]],
+    dev: Optional[Tuple[str, int]],
+    local: Optional[LocalType],
+) -> CmpKey:
+
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    _release = tuple(
+        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        _pre: CmpPrePostDevType = NegativeInfinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        _pre = Infinity
+    else:
+        _pre = pre
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        _post: CmpPrePostDevType = NegativeInfinity
+
+    else:
+        _post = post
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        _dev: CmpPrePostDevType = Infinity
+
+    else:
+        _dev = dev
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        _local: CmpLocalType = NegativeInfinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        _local = tuple(
+            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+        )
+
+    return epoch, _release, _pre, _post, _dev, _local
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyOpenSSL-20.0.1.dist-info/INSTALLER b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyOpenSSL-20.0.1.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..f79e4cb9aaf0b2d9e8ba78861e2071317b2384b3
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyOpenSSL-20.0.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+conda
\ No newline at end of file
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/AUTHORS.md b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/AUTHORS.md
new file mode 100644
index 0000000000000000000000000000000000000000..d6b5ee01f9e57c3aa5b6474d2aae8024e5bf4c79
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/AUTHORS.md
@@ -0,0 +1,110 @@
+## Authors
+
+This project was started and continues to be led by Brian E. Granger
+(ellisonbg AT gmail DOT com). Min Ragan-Kelley (benjaminrk AT gmail DOT
+com) is the primary developer of pyzmq at this time.
+
+The following people have contributed to the project:
+
+- Alexander Else (alexander DOT else AT team DOT telstra DOT com)
+- Alexander Pyhalov (apyhalov AT gmail DOT com)
+- Alexandr Emelin (frvzmb AT gmail DOT com)
+- Amr Ali (amr AT ledgerx DOT com)
+- Andre Caron (andre DOT l DOT caron AT gmail DOT com)
+- Andrea Crotti (andrea DOT crotti DOT 0 AT gmail DOT com)
+- Andrew Gwozdziewycz (git AT apgwoz DOT com)
+- Baptiste Lepilleur (baptiste DOT lepilleur AT gmail DOT com)
+- Brandyn A. White (bwhite AT dappervision DOT com)
+- Brian E. Granger (ellisonbg AT gmail DOT com)
+- Brian Hoffman (hoffman_brian AT bah DOT com)
+- Carlos A. Rocha (carlos DOT rocha AT gmail DOT com)
+- Chris Laws (clawsicus AT gmail DOT com)
+- Christian Wyglendowski (christian AT bu DOT mp)
+- Christoph Gohlke (cgohlke AT uci DOT edu)
+- Curtis (curtis AT tinbrain DOT net)
+- Cyril Holweck (cyril DOT holweck AT free DOT fr)
+- Dan Colish (dcolish AT gmail DOT com)
+- Daniel Lundin (dln AT eintr DOT org)
+- Daniel Truemper (truemped AT googlemail DOT com)
+- Douglas Creager (douglas DOT creager AT redjack DOT com)
+- Eduardo Stalinho (eduardooc DOT 86 AT gmail DOT com)
+- Eren Güven (erenguven0 AT gmail DOT com)
+- Erick Tryzelaar (erick DOT tryzelaar AT gmail DOT com)
+- Erik Tollerud (erik DOT tollerud AT gmail DOT com)
+- FELD Boris (lothiraldan AT gmail DOT com)
+- Fantix King (fantix DOT king AT gmail DOT com)
+- Felipe Cruz (felipecruz AT loogica DOT net)
+- Fernando Perez (Fernando DOT Perez AT berkeley DOT edu)
+- Frank Wiles (frank AT revsys DOT com)
+- Félix-Antoine Fortin (felix DOT antoine DOT fortin AT gmail DOT com)
+- Gavrie Philipson (gavriep AT il DOT ibm DOT com)
+- Godefroid Chapelle (gotcha AT bubblenet DOT be)
+- Greg Banks (gbanks AT mybasis DOT com)
+- Greg Ward (greg AT gerg DOT ca)
+- Guido Goldstein (github AT a-nugget DOT de)
+- Ian Lee (IanLee1521 AT gmail DOT com)
+- Ionuț Arțăriși (ionut AT artarisi DOT eu)
+- Ivo Danihelka (ivo AT danihelka DOT net)
+- Iyed (iyed DOT bennour AT gmail DOT com)
+- Jim Garrison (jim AT garrison DOT cc)
+- John Gallagher (johnkgallagher AT gmail DOT com)
+- Julian Taylor (jtaylor DOT debian AT googlemail DOT com)
+- Justin Bronder (jsbronder AT gmail DOT com)
+- Justin Riley (justin DOT t DOT riley AT gmail DOT com)
+- Marc Abramowitz (marc AT marc-abramowitz DOT com)
+- Matthew Aburn (mattja6 AT gmail DOT com)
+- Michel Pelletier (pelletier DOT michel AT gmail DOT com)
+- Michel Zou (xantares09 AT hotmail DOT com)
+- Min Ragan-Kelley (benjaminrk AT gmail DOT com)
+- Nell Hardcastle (nell AT dev-nell DOT com)
+- Nicholas Pilkington (nicholas DOT pilkington AT gmail DOT com)
+- Nicholas Piël (nicholas AT nichol DOT as)
+- Nick Pellegrino (npellegrino AT mozilla DOT com)
+- Nicolas Delaby (nicolas DOT delaby AT ezeep DOT com)
+- Ondrej Certik (ondrej AT certik DOT cz)
+- Paul Colomiets (paul AT colomiets DOT name)
+- Pawel Jasinski (pawel DOT jasinski AT gmail DOT com)
+- Phus Lu (phus DOT lu AT gmail DOT com)
+- Robert Buchholz (rbu AT goodpoint DOT de)
+- Robert Jordens (jordens AT gmail DOT com)
+- Ryan Cox (ryan DOT a DOT cox AT gmail DOT com)
+- Ryan Kelly (ryan AT rfk DOT id DOT au)
+- Scott Maxwell (scott AT codecobblers DOT com)
+- Scott Sadler (github AT mashi DOT org)
+- Simon Knight (simon DOT knight AT gmail DOT com)
+- Stefan Friesel (sf AT cloudcontrol DOT de)
+- Stefan van der Walt (stefan AT sun DOT ac DOT za)
+- Stephen Diehl (stephen DOT m DOT diehl AT gmail DOT com)
+- Sylvain Corlay (scorlay AT bloomberg DOT net)
+- Thomas Kluyver (takowl AT gmail DOT com)
+- Thomas Spura (tomspur AT fedoraproject DOT org)
+- Tigger Bear (Tigger AT Tiggers-Mac-mini DOT local)
+- Torsten Landschoff (torsten DOT landschoff AT dynamore DOT de)
+- Vadim Markovtsev (v DOT markovtsev AT samsung DOT com)
+- Yannick Hold (yannickhold AT gmail DOT com)
+- Zbigniew Jędrzejewski-Szmek (zbyszek AT in DOT waw DOT pl)
+- hugo  shi (hugoshi AT bleb2 DOT (none))
+- jdgleeson (jdgleeson AT mac DOT com)
+- kyledj (kyle AT bucebuce DOT com)
+- spez (steve AT hipmunk DOT com)
+- stu (stuart DOT axon AT jpcreative DOT co DOT uk)
+- xantares (xantares AT fujitsu-l64 DOT (none))
+
+as reported by:
+
+    git log --all --format='- %aN (%aE)' | sort -u | sed 's/@/ AT /1' | sed -e 's/\.\([^ ]\)/ DOT \1/g'
+
+with some adjustments.
+
+### Not in git log
+
+- Brandon Craig-Rhodes (brandon AT rhodesmill DOT org)
+- Eugene Chernyshov (chernyshov DOT eugene AT gmail DOT com)
+- Craig Austin (craig DOT austin AT gmail DOT com)
+
+### gevent\_zeromq, now zmq.green
+
+- Travis Cline (travis DOT cline AT gmail DOT com)
+- Ryan Kelly (ryan AT rfk DOT id DOT au)
+- Zachary Voase (z AT zacharyvoase DOT com)
+
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/COPYING.BSD b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/COPYING.BSD
new file mode 100644
index 0000000000000000000000000000000000000000..a0a3790ba84c792e1d7b64a32d53b1a22fc9e5ae
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/COPYING.BSD
@@ -0,0 +1,32 @@
+PyZMQ is licensed under the terms of the Modified BSD License (also known as
+New or Revised BSD), as follows:
+
+Copyright (c) 2009-2012, Brian Granger, Min Ragan-Kelley
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+Redistributions in binary form must reproduce the above copyright notice, this
+list of conditions and the following disclaimer in the documentation and/or
+other materials provided with the distribution.
+
+Neither the name of PyZMQ nor the names of its contributors may be used to
+endorse or promote products derived from this software without specific prior
+written permission.
+
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/COPYING.LESSER b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/COPYING.LESSER
new file mode 100644
index 0000000000000000000000000000000000000000..911c54570e55d5a6cf6234dee57e2441cc07b002
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/COPYING.LESSER
@@ -0,0 +1,181 @@
+		   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. 
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
+
+--------------------------------------------------------------------------------
+
+           SPECIAL EXCEPTION GRANTED BY COPYRIGHT HOLDERS
+
+As a special exception, copyright holders give you permission to link this
+library with independent modules to produce an executable, regardless of
+the license terms of these independent modules, and to copy and distribute
+the resulting executable under terms of your choice, provided that you also
+meet, for each linked independent module, the terms and conditions of
+the license of that module. An independent module is a module which is not
+derived from or based on this library. If you modify this library, you must
+extend this exception to your version of the library.
+
+Note: this exception relieves you of any obligations under sections 4 and 5
+of this license, and section 6 of the GNU General Public License.
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/INSTALLER b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/METADATA b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..c42077f530c29c0b756d86cbb96b23417ec3deba
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/METADATA
@@ -0,0 +1,127 @@
+Metadata-Version: 2.1
+Name: pyzmq
+Version: 19.0.2
+Summary: Python bindings for 0MQ
+Home-page: https://pyzmq.readthedocs.org
+Author: Brian E. Granger, Min Ragan-Kelley
+Author-email: zeromq-dev@lists.zeromq.org
+License: LGPL+BSD
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Science/Research
+Classifier: Intended Audience :: System Administrators
+Classifier: License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX
+Classifier: Topic :: System :: Networking
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*
+Description-Content-Type: text/markdown
+
+# PyZMQ: Python bindings for ØMQ
+
+[![Build Status](https://travis-ci.org/zeromq/pyzmq.svg?branch=master)](https://travis-ci.org/zeromq/pyzmq)
+
+[![Windows Build status](https://ci.appveyor.com/api/projects/status/ugoid0r2fnq8sr56/branch/master?svg=true)](https://ci.appveyor.com/project/minrk/pyzmq/branch/master)
+
+This package contains Python bindings for [ØMQ](http://www.zeromq.org).
+ØMQ is a lightweight and fast messaging implementation.
+
+PyZMQ should work with any reasonable version of Python (≥ 3.4),
+as well as Python 2.7 and 3.3, as well as PyPy.
+The Cython backend used by CPython supports libzmq ≥ 2.1.4 (including 3.2.x and 4.x),
+but the CFFI backend used by PyPy only supports libzmq ≥ 3.2.2 (including 4.x).
+
+For a summary of changes to pyzmq, see our
+[changelog](https://pyzmq.readthedocs.org/en/latest/changelog.html).
+
+### ØMQ 3.x, 4.x
+
+PyZMQ fully supports the 3.x and 4.x APIs of libzmq,
+developed at [zeromq/libzmq](https://github.com/zeromq/libzmq).
+No code to change, no flags to pass,
+just build pyzmq against the latest and it should work.
+
+PyZMQ does not support the old libzmq 2 API on PyPy.
+
+## Documentation
+
+See PyZMQ's Sphinx-generated
+[documentation](https://zeromq.github.io/pyzmq) on GitHub for API
+details, and some notes on Python and Cython development. If you want to
+learn about using ØMQ in general, the excellent [ØMQ
+Guide](http://zguide.zeromq.org/py:all) is the place to start, which has a
+Python version of every example. We also have some information on our
+[wiki](https://github.com/zeromq/pyzmq/wiki).
+
+## Downloading
+
+Unless you specifically want to develop PyZMQ, we recommend downloading
+the PyZMQ source code or wheels from
+[PyPI](https://pypi.io/project/pyzmq),
+or install with conda.
+
+You can also get the latest source code from our GitHub repository, but
+building from the repository will require that you install recent Cython.
+
+## Building and installation
+
+For more detail on building pyzmq, see [our Wiki](https://github.com/zeromq/pyzmq/wiki/Building-and-Installing-PyZMQ).
+
+We build wheels for OS X, Windows, and Linux, so you can get a binary on those platforms with:
+
+    pip install pyzmq
+
+but compiling from source with `pip install pyzmq` should work in most environments.
+Especially on OS X, make sure you are using the latest pip (≥ 8), or it may not find the right wheels.
+
+If the wheel doesn't work for some reason, or you want to force pyzmq to be compiled
+(this is often preferable if you already have libzmq installed and configured the way you want it),
+you can force installation with:
+
+    pip install --no-binary=:all: pyzmq
+
+When compiling pyzmq (e.g. installing with pip on Linux),
+it is generally recommended that zeromq be installed separately,
+via homebrew, apt, yum, etc:
+
+    # Debian-based
+    sudo apt-get install libzmq3-dev
+
+    # RHEL-based
+    sudo yum install libzmq3-devel
+
+If this is not available, pyzmq will *try* to build libzmq as a Python Extension,
+though this is not guaranteed to work.
+
+Building pyzmq from the git repo (including release tags on GitHub) requires Cython.
+
+## Old versions
+
+
+pyzmq 16 drops support Python 2.6 and 3.2.
+If you need to use one of those Python versions, you can pin your pyzmq version to before 16:
+
+    pip install 'pyzmq<16'
+
+For libzmq 2.0.x, use 'pyzmq<2.1'
+
+pyzmq-2.1.11 was the last version of pyzmq to support Python 2.5,
+and pyzmq ≥ 2.2.0 requires Python ≥ 2.6.
+pyzmq-13.0.0 introduces PyPy support via CFFI, which only supports libzmq-3.2.2 and newer.
+
+PyZMQ releases ≤ 2.2.0 matched libzmq versioning, but this is no longer the case,
+starting with PyZMQ 13.0.0 (it was the thirteenth release, so why not?).
+PyZMQ ≥ 13.0 follows semantic versioning conventions accounting only for PyZMQ itself.
+
+
+
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/RECORD b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..ccfc869cad39d82cf612b4a454572f7dcb173600
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/RECORD
@@ -0,0 +1,277 @@
+pyzmq-19.0.2.dist-info/AUTHORS.md,sha256=r2SIkOZNtlHm6oKoQ5zvXrS0zoWqZntvQcaREZcCDuE,4809
+pyzmq-19.0.2.dist-info/COPYING.BSD,sha256=rvO4BXA1HUTinCLQgNTp4QazTz_bxc35Y2mUR0xysaI,1596
+pyzmq-19.0.2.dist-info/COPYING.LESSER,sha256=RLI3Z988D-J_iJgfCbx_UKFpXOxLJCUvpfVfA-drzW0,8496
+pyzmq-19.0.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+pyzmq-19.0.2.dist-info/METADATA,sha256=o247aq8B42GXMzRnfb4NxfqBDwLJz42cBtWqzQqe1sY,4967
+pyzmq-19.0.2.dist-info/RECORD,,
+pyzmq-19.0.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pyzmq-19.0.2.dist-info/WHEEL,sha256=0wGQBSV-BlYX9ESMZyEiLMpXIYwrZGj6McPYyDp_RjA,108
+pyzmq-19.0.2.dist-info/top_level.txt,sha256=6PzrZuGHEZe5WZEFrVur4PtbDQrn20QA2bOkOnxn2ZQ,4
+pyzmq.libs/libsodium-bcf9f097.so.23.3.0,sha256=5ny27vjJLCoibW6-i5Sz4g4i4CPr-C5gb-Nnv8Bqkxw,1196672
+pyzmq.libs/libzmq-1358af2c.so.5.2.2,sha256=Vg6vzeT7Iek4IX5Dntkp1fhRRF83-7Ua70bwBv5ow6w,889528
+zmq/__init__.pxd,sha256=E-HbBi-9PCkQiG8zvkK67KoMjmhnFXDGZwzM9-4nwIw,179
+zmq/__init__.py,sha256=DaKpT1A8O3NbChgW8JTBVBzfCqHj4n-b7mbKOillz7o,2636
+zmq/__pycache__/__init__.cpython-38.pyc,,
+zmq/__pycache__/_future.cpython-38.pyc,,
+zmq/__pycache__/decorators.cpython-38.pyc,,
+zmq/__pycache__/error.cpython-38.pyc,,
+zmq/_future.py,sha256=dD4rbk261cp2T5jL25G7AkOhTCOTtSLQhikr_iDPz9Y,18531
+zmq/asyncio/__init__.py,sha256=XfSwsMiAkhGiQm79FtiD22xsmaLT35tceYJoAb3PIy4,2680
+zmq/asyncio/__pycache__/__init__.cpython-38.pyc,,
+zmq/auth/__init__.py,sha256=FvBzjtCqlssiJGgsATEd-2VHjXkhI0QM1SSG15w-spE,328
+zmq/auth/__pycache__/__init__.cpython-38.pyc,,
+zmq/auth/__pycache__/base.cpython-38.pyc,,
+zmq/auth/__pycache__/certs.cpython-38.pyc,,
+zmq/auth/__pycache__/ioloop.cpython-38.pyc,,
+zmq/auth/__pycache__/thread.cpython-38.pyc,,
+zmq/auth/asyncio/__init__.py,sha256=7aAFkEH2PrAjcIH9dgolwacto1ex-U6BXadtFLJTg2w,1385
+zmq/auth/asyncio/__pycache__/__init__.cpython-38.pyc,,
+zmq/auth/base.py,sha256=dsiXUT4bSkIZFcp2yPdR7kIVcfNkI7IMHsesEAl322g,14847
+zmq/auth/certs.py,sha256=KvB1IJS-J42dSC5X_RttvyTsV0k7FfNSeuYdRunU_DU,4179
+zmq/auth/ioloop.py,sha256=DQm2rrewrvpsyZ29cyexISTtvQvYyKhz90RQ18SUImI,1106
+zmq/auth/thread.py,sha256=vRqEcQquwrmsNoIqwwNrf7F9QWnGXSjrG1Bz67jQ3rs,7602
+zmq/backend/__init__.py,sha256=EhoToqakw_hu8ro73ezAAtjI2c8URvKzReeFhyd3FVA,1296
+zmq/backend/__pycache__/__init__.cpython-38.pyc,,
+zmq/backend/__pycache__/select.cpython-38.pyc,,
+zmq/backend/cffi/__init__.py,sha256=ygw-qMDh88aFfvhY21Bzd__7AKKUzteaH6p0IaSzcWs,616
+zmq/backend/cffi/__pycache__/__init__.cpython-38.pyc,,
+zmq/backend/cffi/__pycache__/_cffi.cpython-38.pyc,,
+zmq/backend/cffi/__pycache__/_poll.cpython-38.pyc,,
+zmq/backend/cffi/__pycache__/constants.cpython-38.pyc,,
+zmq/backend/cffi/__pycache__/context.cpython-38.pyc,,
+zmq/backend/cffi/__pycache__/devices.cpython-38.pyc,,
+zmq/backend/cffi/__pycache__/error.cpython-38.pyc,,
+zmq/backend/cffi/__pycache__/message.cpython-38.pyc,,
+zmq/backend/cffi/__pycache__/socket.cpython-38.pyc,,
+zmq/backend/cffi/__pycache__/utils.cpython-38.pyc,,
+zmq/backend/cffi/_cdefs.h,sha256=LP-gucODZEO4HKTg0A9BUWV1zYelomnrlbr8Drt3E9M,2216
+zmq/backend/cffi/_cffi.py,sha256=xzHOiLRe_MWhpJAtEb8P2HtcrpPEg6Mc4S2IY9dkm5E,3882
+zmq/backend/cffi/_poll.py,sha256=mo7MYUJrBYhlOQbtQ3G1GUFsJDW3SulfuCsEwGF-5rE,2832
+zmq/backend/cffi/_verify.c,sha256=4mYED2HECTdM_Fwlou92o44hW5X-8QoXRnoRtnDiU6U,105
+zmq/backend/cffi/constants.py,sha256=_Ann2OZQLkDGOGplbSJs2NNExK9_ktgb8eFHNv3HtLU,357
+zmq/backend/cffi/context.py,sha256=XcuQTclCGwF5Y1qRMmMazK8jrUyhAAovkZ5cLsYE1Mo,1916
+zmq/backend/cffi/devices.py,sha256=Pgv3HqB5jzzUrmBNNUs82hZqCqZU0M0CPvjG43HYnKU,1644
+zmq/backend/cffi/error.py,sha256=wmnYd0-6EH0r4frKGBSQfr9HX2xVl72mFu1Ks5AKFrw,353
+zmq/backend/cffi/message.py,sha256=hBbLKpRMRQpPQD8JT387PrgymHh3sGKX8H5J5gPTcCQ,1388
+zmq/backend/cffi/socket.py,sha256=nEX6Ljhnxd0mWcdEKF2RooNssXjtkOTkdL4VTCaayRk,9126
+zmq/backend/cffi/utils.py,sha256=mr6r-Gs4zabqZAn3anddugAO3-cZoewPxE60zbQf8Ns,2167
+zmq/backend/cython/__init__.pxd,sha256=M4fkNEuPrh8xGNovdEdDobNWBbFUp8xF7lFI0JDwTVg,140
+zmq/backend/cython/__init__.py,sha256=PuI1rsIZtpAQc_T6iwqx253bwLnhe-OaHk2NbEgnBGs,731
+zmq/backend/cython/__pycache__/__init__.cpython-38.pyc,,
+zmq/backend/cython/_device.cpython-38-x86_64-linux-gnu.so,sha256=6fTedeMIEvOPA12sNr18XwpjPGHHjxHwuR_CYueUHFU,58416
+zmq/backend/cython/_poll.cpython-38-x86_64-linux-gnu.so,sha256=_mbLwIDVshwYJ4Yn2ridaKRqPEH7WLPsqsMbwXBSsHM,75552
+zmq/backend/cython/_proxy_steerable.cpython-38-x86_64-linux-gnu.so,sha256=Dg4on3gbFLpyFSkr1ShX7nKu3lCtpm5z230MpjHxY-U,54144
+zmq/backend/cython/_version.cpython-38-x86_64-linux-gnu.so,sha256=A-Gh_9gkMK9zLVfYgqSR8Kmuzq6vDe-vRqtbelmrGME,36616
+zmq/backend/cython/checkrc.pxd,sha256=DnQ1smwFGt4tpDj0iXKx5NBSbWq1MFUYaMj6HwzMN74,873
+zmq/backend/cython/constant_enums.pxi,sha256=iNH2gjeVLwqTQTYIDXuU5Dc1gNsHGozG7W0g_FC6EBU,6751
+zmq/backend/cython/constants.cpython-38-x86_64-linux-gnu.so,sha256=KhATd6LPiKqXc74VjfrXOS4qJs_oVN2WZVIlooBBE5w,81160
+zmq/backend/cython/constants.pxi,sha256=PrimwV5KwAgyzlbPKZj4jI7zzTtDGMxFArKIgalzbos,12728
+zmq/backend/cython/context.cpython-38-x86_64-linux-gnu.so,sha256=i5vHcouHLDBmffZXbxzh5fmIWUUJIMgZXRJ8-8E545s,71408
+zmq/backend/cython/context.pxd,sha256=MtD1sgvAtHAHbBTH65zQecF0pEwTfdcDctCkobBRy_I,1409
+zmq/backend/cython/error.cpython-38-x86_64-linux-gnu.so,sha256=Uc8uRiclH34dfEjzgPYS4bC57fSBmZFc4RN1y4pRsXs,40984
+zmq/backend/cython/libzmq.pxd,sha256=m_JRK-tTbDwoLT-XGOm5ee7II8BknPHdGrrVykV2NYU,4400
+zmq/backend/cython/message.cpython-38-x86_64-linux-gnu.so,sha256=wN_rvU1j9dHOU30lKDqv_AnPJP-1mpuWbvCYfJ9CZrU,102064
+zmq/backend/cython/message.pxd,sha256=QcUuGQSJkSVk6mw8DEqchPflO_WhRQEn36hRHQcfqms,2398
+zmq/backend/cython/socket.cpython-38-x86_64-linux-gnu.so,sha256=VP1I18iCyeVJ423j9dUF1Ze5gFVA2LcliiKSNdjm-ec,158880
+zmq/backend/cython/socket.pxd,sha256=aCY_6VvQNDU2Sv8tgAqrAx3gTHcQkt2RSB8C0OQLsyw,2105
+zmq/backend/cython/utils.cpython-38-x86_64-linux-gnu.so,sha256=7ls36qQuOSX9sSC1xWLazOctcvjZpqCWB2vKf4dGtLo,54008
+zmq/backend/select.py,sha256=c4DtcfNcGdAR-ghZhpwkR1P20KvyY4n5Fd6HLZ9TIG0,921
+zmq/decorators.py,sha256=DWtd8urTATbBNClVsrLEbdgHuIsPNo7O24D0bSaReyE,5242
+zmq/devices/__init__.py,sha256=y7Si69vs6Kh4pyquZdv8onxzv2K7LecXkwN1gumtQCo,714
+zmq/devices/__pycache__/__init__.cpython-38.pyc,,
+zmq/devices/__pycache__/basedevice.cpython-38.pyc,,
+zmq/devices/__pycache__/monitoredqueue.cpython-38.pyc,,
+zmq/devices/__pycache__/monitoredqueuedevice.cpython-38.pyc,,
+zmq/devices/__pycache__/proxydevice.cpython-38.pyc,,
+zmq/devices/__pycache__/proxysteerabledevice.cpython-38.pyc,,
+zmq/devices/basedevice.py,sha256=zZkKP3RcXEndbaI2arJaCeBxMWHR3-YhJRD7zZU-y7c,8466
+zmq/devices/monitoredqueue.cpython-38-x86_64-linux-gnu.so,sha256=Zl62_xtI_Smu3mRyq6D8IuHmw3X4PoJErhqKgrUTt-I,71272
+zmq/devices/monitoredqueue.pxd,sha256=heEUShZup1-MmonmHvVeMEfW-TWrqzkhfVZ6vf1sC-o,6385
+zmq/devices/monitoredqueue.py,sha256=wftgLpNdYUcdGeUQR8S5hqKOPidqCs9KXyiEwVkHMd0,1041
+zmq/devices/monitoredqueuedevice.py,sha256=OpD_ca8NDctpUpHMXISzEHXe-f99kh4-AG-Hqh7N1FE,1994
+zmq/devices/proxydevice.py,sha256=QSgXezr3TH6isEI48XdtsPRierAfb_QJ44LphHqYrh8,2889
+zmq/devices/proxysteerabledevice.py,sha256=JZgMUPKdKszKK7QL7hVmOzx414nDoHhjCpdrwH8x4J4,3305
+zmq/error.py,sha256=61GlzZJljRLALkCRuAaXxtXBq7UkSMkRZAjUGP22xWk,5268
+zmq/eventloop/__init__.py,sha256=9xoJ8uaGUM4Oy6l9qc_M8_vyA7ny9kpbMQFPNjHUNaE,109
+zmq/eventloop/__pycache__/__init__.cpython-38.pyc,,
+zmq/eventloop/__pycache__/_deprecated.cpython-38.pyc,,
+zmq/eventloop/__pycache__/future.cpython-38.pyc,,
+zmq/eventloop/__pycache__/ioloop.cpython-38.pyc,,
+zmq/eventloop/__pycache__/zmqstream.cpython-38.pyc,,
+zmq/eventloop/_deprecated.py,sha256=PJENb_FidCenSyZCMBgX6bOylkX9BlhRqGnwHU7ZFTA,6873
+zmq/eventloop/future.py,sha256=HArqtfsPhYseiOFuGHSz-DAccTFcnmxJbbHN_ShlyPo,1848
+zmq/eventloop/ioloop.py,sha256=bZqCaO-2gprf5CPYfynnEs7REtY58yydEFaxNxJK-J4,4135
+zmq/eventloop/minitornado/__init__.py,sha256=uKArT4cNscHMAQUVXS0-kNhd3Q-RLbmDrLkELbrx-jY,341
+zmq/eventloop/minitornado/__pycache__/__init__.cpython-38.pyc,,
+zmq/eventloop/minitornado/__pycache__/concurrent.cpython-38.pyc,,
+zmq/eventloop/minitornado/__pycache__/ioloop.cpython-38.pyc,,
+zmq/eventloop/minitornado/__pycache__/log.cpython-38.pyc,,
+zmq/eventloop/minitornado/__pycache__/stack_context.cpython-38.pyc,,
+zmq/eventloop/minitornado/__pycache__/util.cpython-38.pyc,,
+zmq/eventloop/minitornado/concurrent.py,sha256=ZP0O2x6ZRLC_piK6btjpCDPJQhoexnFG59VJvHljwiM,459
+zmq/eventloop/minitornado/ioloop.py,sha256=ucbYeJ5exHjn3-N_i0q0GEa40ZTP7JMEwNbsw-Um3DU,41351
+zmq/eventloop/minitornado/log.py,sha256=TMT1en_Zp66rWCvmgqP9Eugu1KcBl6T3zcC89wwXvqI,181
+zmq/eventloop/minitornado/platform/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+zmq/eventloop/minitornado/platform/__pycache__/__init__.cpython-38.pyc,,
+zmq/eventloop/minitornado/platform/__pycache__/auto.cpython-38.pyc,,
+zmq/eventloop/minitornado/platform/__pycache__/common.cpython-38.pyc,,
+zmq/eventloop/minitornado/platform/__pycache__/interface.cpython-38.pyc,,
+zmq/eventloop/minitornado/platform/__pycache__/posix.cpython-38.pyc,,
+zmq/eventloop/minitornado/platform/__pycache__/windows.cpython-38.pyc,,
+zmq/eventloop/minitornado/platform/auto.py,sha256=En7TgpanNDwP-xLjBigO4zSL49okgyFKmngFd13oM-Q,1424
+zmq/eventloop/minitornado/platform/common.py,sha256=aC0T8Jjin48-o9F6kxucIGbhzlo1IlX466lXD9QuE4M,3327
+zmq/eventloop/minitornado/platform/interface.py,sha256=757vw4dGAlErxWMwTw8h_STcOGjp6Ih4eJenGk8zdEY,2244
+zmq/eventloop/minitornado/platform/posix.py,sha256=_z6-sDzustldrwMB92o3sLqj7Hfl-F8-pJ58CfiAMNk,1844
+zmq/eventloop/minitornado/platform/windows.py,sha256=xonLtnGj7ZSo5q1_try4hkE6rWEmVRU-EbCEmWhgPog,681
+zmq/eventloop/minitornado/stack_context.py,sha256=YjvOJ4ooIoX1VEuBg4Q2Hn882pmtoDvM_GHop04jz9g,13167
+zmq/eventloop/minitornado/util.py,sha256=gSrCzHrvkLfyfQcTA8iBbygkRbOoN6c3NXGJMa61hMU,7433
+zmq/eventloop/zmqstream.py,sha256=3kCQBEQp8ByQOASz8HMSs5d5qnUDvIcS5ssCxTrjoSM,19497
+zmq/green/__init__.py,sha256=1_Z_22-tSkxzQPjaMcBLBz-sUXn26aAyp-3bgVpdk2A,1117
+zmq/green/__pycache__/__init__.cpython-38.pyc,,
+zmq/green/__pycache__/core.cpython-38.pyc,,
+zmq/green/__pycache__/device.cpython-38.pyc,,
+zmq/green/__pycache__/poll.cpython-38.pyc,,
+zmq/green/core.py,sha256=8tdmPVQij32zRirUzI49joC7h3IHCqlJ7TqAozcCDd4,10575
+zmq/green/device.py,sha256=3Si8rxNe1kWX5lU6EWqOZnZnuNWWe54_nZArK-JcHqA,950
+zmq/green/eventloop/__init__.py,sha256=xuZ0H_bLyZ613LHTBmG5pirfMNi2tm2OQRH3mS2KeeA,67
+zmq/green/eventloop/__pycache__/__init__.cpython-38.pyc,,
+zmq/green/eventloop/__pycache__/ioloop.cpython-38.pyc,,
+zmq/green/eventloop/__pycache__/zmqstream.cpython-38.pyc,,
+zmq/green/eventloop/ioloop.py,sha256=u2CCTtfAfdmKyXilAfrqs6W7IJNqSeKlcnqKuIxTTlk,35
+zmq/green/eventloop/zmqstream.py,sha256=o34BVY-9xzX5iZLr8DX7T-Hd9l8XvV0ncW9165G6Spo,308
+zmq/green/poll.py,sha256=nhxR2WwA-EsN_e7syWADZaga2FeRKBRozsO7um1jsuE,2934
+zmq/log/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+zmq/log/__main__.py,sha256=8lLhgXE0CuchP-fTsbtKzaNIBvdVMsLXf5QiNO9Y-nw,3948
+zmq/log/__pycache__/__init__.cpython-38.pyc,,
+zmq/log/__pycache__/__main__.cpython-38.pyc,,
+zmq/log/__pycache__/handlers.cpython-38.pyc,,
+zmq/log/handlers.py,sha256=JRLmfLiLV5xu9ThobQEk470L06aqn7qyDqNM_Vfu-XI,5952
+zmq/ssh/__init__.py,sha256=2Wcr18a8hS5Qjwhn1p6SYb6NMDIi7Y48JRXg56iU0fI,29
+zmq/ssh/__pycache__/__init__.cpython-38.pyc,,
+zmq/ssh/__pycache__/forward.cpython-38.pyc,,
+zmq/ssh/__pycache__/tunnel.cpython-38.pyc,,
+zmq/ssh/forward.py,sha256=cZGfpmx6KfDIwIXvRRZMGaBHdM9CAFrdn4vp50lCqwo,3549
+zmq/ssh/tunnel.py,sha256=mBiOINMYrMpO-r5MXA2UWiysNi3hi2R2p3SoUJBs0mg,12729
+zmq/sugar/__init__.py,sha256=DLsblTlA-Y6D3teTyjXWliB_Be-mSj_Wlxvh6WPfZwE,738
+zmq/sugar/__pycache__/__init__.cpython-38.pyc,,
+zmq/sugar/__pycache__/attrsettr.cpython-38.pyc,,
+zmq/sugar/__pycache__/constants.cpython-38.pyc,,
+zmq/sugar/__pycache__/context.cpython-38.pyc,,
+zmq/sugar/__pycache__/frame.cpython-38.pyc,,
+zmq/sugar/__pycache__/poll.cpython-38.pyc,,
+zmq/sugar/__pycache__/socket.cpython-38.pyc,,
+zmq/sugar/__pycache__/stopwatch.cpython-38.pyc,,
+zmq/sugar/__pycache__/tracker.cpython-38.pyc,,
+zmq/sugar/__pycache__/version.cpython-38.pyc,,
+zmq/sugar/attrsettr.py,sha256=406Vt58Wu2Wp_xjmStkducBxQ6UZEJYGkowEG-DqY1c,2133
+zmq/sugar/constants.py,sha256=1AXYCJn9wNNqcchm_L2quDjz6AdTu_4GYgdw3qZwphQ,2455
+zmq/sugar/context.py,sha256=wuekl6-F9fPvjMHFpHgIlLjjn44QZ01zYLL05QTpGig,9076
+zmq/sugar/frame.py,sha256=U6KUFRYlqdiTbNpLUsTvgsxMEU9a6mOV-ond5sH7UsU,2959
+zmq/sugar/poll.py,sha256=1wBWysCIcDp24TMV3Tzuxk-KkqVasIUxRqW2Xd3e5Ic,5578
+zmq/sugar/socket.py,sha256=IUK9HbfK0sEQMxRQnlldfqvHs4MSFWl_0_yv-Z-nT2I,25513
+zmq/sugar/stopwatch.py,sha256=XlcCEhuqnT-Zm34DvUA7zbd1uLYWdZZ_8oP5PaSBcP0,915
+zmq/sugar/tracker.py,sha256=h87u9CFFd57N_2XGaj5V-7oEnxD03E7QCSMUgtki1dE,3761
+zmq/sugar/version.py,sha256=NbotWklI3V3zymXqd5hA1EglQNPfxlKtgfwD2vlqiIM,1231
+zmq/tests/__init__.py,sha256=5J9QTdadpGGLrHnXfKXpQmiYyaPVCbsVl5UYSvd-_0I,6382
+zmq/tests/__pycache__/__init__.cpython-38.pyc,,
+zmq/tests/__pycache__/conftest.cpython-38.pyc,,
+zmq/tests/__pycache__/test_auth.cpython-38.pyc,,
+zmq/tests/__pycache__/test_cffi_backend.cpython-38.pyc,,
+zmq/tests/__pycache__/test_constants.cpython-38.pyc,,
+zmq/tests/__pycache__/test_context.cpython-38.pyc,,
+zmq/tests/__pycache__/test_cython.cpython-38.pyc,,
+zmq/tests/__pycache__/test_decorators.cpython-38.pyc,,
+zmq/tests/__pycache__/test_device.cpython-38.pyc,,
+zmq/tests/__pycache__/test_draft.cpython-38.pyc,,
+zmq/tests/__pycache__/test_error.cpython-38.pyc,,
+zmq/tests/__pycache__/test_etc.cpython-38.pyc,,
+zmq/tests/__pycache__/test_future.cpython-38.pyc,,
+zmq/tests/__pycache__/test_imports.cpython-38.pyc,,
+zmq/tests/__pycache__/test_includes.cpython-38.pyc,,
+zmq/tests/__pycache__/test_ioloop.cpython-38.pyc,,
+zmq/tests/__pycache__/test_log.cpython-38.pyc,,
+zmq/tests/__pycache__/test_message.cpython-38.pyc,,
+zmq/tests/__pycache__/test_monitor.cpython-38.pyc,,
+zmq/tests/__pycache__/test_monqueue.cpython-38.pyc,,
+zmq/tests/__pycache__/test_multipart.cpython-38.pyc,,
+zmq/tests/__pycache__/test_pair.cpython-38.pyc,,
+zmq/tests/__pycache__/test_poll.cpython-38.pyc,,
+zmq/tests/__pycache__/test_proxy_steerable.cpython-38.pyc,,
+zmq/tests/__pycache__/test_pubsub.cpython-38.pyc,,
+zmq/tests/__pycache__/test_reqrep.cpython-38.pyc,,
+zmq/tests/__pycache__/test_retry_eintr.cpython-38.pyc,,
+zmq/tests/__pycache__/test_security.cpython-38.pyc,,
+zmq/tests/__pycache__/test_socket.cpython-38.pyc,,
+zmq/tests/__pycache__/test_ssh.cpython-38.pyc,,
+zmq/tests/__pycache__/test_version.cpython-38.pyc,,
+zmq/tests/__pycache__/test_win32_shim.cpython-38.pyc,,
+zmq/tests/__pycache__/test_z85.cpython-38.pyc,,
+zmq/tests/__pycache__/test_zmqstream.cpython-38.pyc,,
+zmq/tests/asyncio/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+zmq/tests/asyncio/__pycache__/__init__.cpython-38.pyc,,
+zmq/tests/asyncio/__pycache__/_test_asyncio.cpython-38.pyc,,
+zmq/tests/asyncio/__pycache__/test_asyncio.cpython-38.pyc,,
+zmq/tests/asyncio/_test_asyncio.py,sha256=ODguyW2evAtSLkC4eang8Lp_fJXPAnYJsrI2vUrJFEk,15999
+zmq/tests/asyncio/test_asyncio.py,sha256=-_dOwgezrUY8Ayq_dqfCfxPnCBGF3V43Bvlsmgzg3NM,138
+zmq/tests/conftest.py,sha256=bQcbo4Y-HQRNl8kMQEyb42HGFLviwaGY81GdNjlwf28,366
+zmq/tests/test_auth.py,sha256=YKnsptieWxRIbtMJWSiDLih_Q426shaalXeaynERBv8,20712
+zmq/tests/test_cffi_backend.py,sha256=OXSvPonSj50MlecUvLhE0t7E102FA4mifgqUeWlC54I,9481
+zmq/tests/test_constants.py,sha256=K8jHGHA1DIDVatibd-E11L-p87L3wXtlPhmjAf32XA8,4570
+zmq/tests/test_context.py,sha256=_zIe4gLHhOF0tJXJYVPrTD2jgI-zD9G66oItON0YUTo,12027
+zmq/tests/test_cython.py,sha256=1-HAweKa0j65lZClRxBLutfdJRukqL6ByZHSYH0BGEo,1048
+zmq/tests/test_decorators.py,sha256=8K0OX7Pm7mUw4mSgj8ZiCzshSP8xzqPCD5dsM-3BBY0,9495
+zmq/tests/test_device.py,sha256=C7aKdgLU_8jdHArVN62p3A83veqy6qlcRMB_nEDY0ho,6169
+zmq/tests/test_draft.py,sha256=I4sqOBRXC3_hhpPjqvUKzfZUh718Nd9Euw5ldQaPM20,1471
+zmq/tests/test_error.py,sha256=BGMQxDGE6Cs-dB0b-1j6IW_6M4tLTbmY3QXISMPq5Mk,1260
+zmq/tests/test_etc.py,sha256=tpLqhJPmgs1xwfC22px9B6K6bh7VKnpYKy-ICQdF9t8,525
+zmq/tests/test_future.py,sha256=dLo5bbTr3QJpm8K2tvAIHSQTSwnWbz03t3xxJjUtP8Q,11159
+zmq/tests/test_imports.py,sha256=H_TscumRXHCOLmFehCYqzhallTN-V7aktRuzkUTGcZI,1805
+zmq/tests/test_includes.py,sha256=mN4xS_3yTgxcqKHmnGFyNPhDCxWQLEy2bbMXIfJu5Oc,1013
+zmq/tests/test_ioloop.py,sha256=Ac-7gPhX6bMy9xU-NhNpQAbDHoZIOW_kFzRD2MuJRPU,3965
+zmq/tests/test_log.py,sha256=W90JvNIjd61acgRD_sQzFcNNA4lJlO6-j60FkP-ZgZQ,6697
+zmq/tests/test_message.py,sha256=0igIrWc2h1CqxPgDQo6dc1lEzdfASIF2jwnietzpnno,11091
+zmq/tests/test_monitor.py,sha256=f9GGdJK7_KaKbhF0nmoL9Cbbb9dLdp4VlzA2xGefrb4,3044
+zmq/tests/test_monqueue.py,sha256=uwhxRagBwz16ANQOzDiNhSCvrUFtZyxSdaFsCBdrxXA,8223
+zmq/tests/test_multipart.py,sha256=HVHr9_0W5Nywmul6hro4U2udz6t2-AyC6hk4NMHU3Ic,944
+zmq/tests/test_pair.py,sha256=7qBWNFwcnFqucxhuDKjnxkV2I0BSzN-_2_6Pjrd1mZs,1260
+zmq/tests/test_poll.py,sha256=rLqsoyhscuT7no1HnbYtZNE26J5h3WQObBmRq5od6eo,7260
+zmq/tests/test_proxy_steerable.py,sha256=CRYjx4r82tnE3HEKHRaF7X3fyCk4XDUoWJS8VVCPSQQ,3922
+zmq/tests/test_pubsub.py,sha256=3F8FAPrO7W_6LYMCIcPDh-GeE0zjlE5vOzKzPRaGHWg,1089
+zmq/tests/test_reqrep.py,sha256=BrM3UXA0fSnlxznHLNWpijlS5JA3-8O5zNY8UtY9lWk,1841
+zmq/tests/test_retry_eintr.py,sha256=tZWkvgbZXTkLFswwOguzp5KFuviJCuMO5AcyI9nXSBc,2960
+zmq/tests/test_security.py,sha256=O5I438fTMi2PIZCqZ6YcpkhKGSHSzu6FwJIWgUg433o,8141
+zmq/tests/test_socket.py,sha256=jteKO8dFJkpgf8-bKpKqHRxU_xiTuu8Hx5OM1161LLQ,21653
+zmq/tests/test_ssh.py,sha256=i2er6LvSd4fsVw-IBiagoTa18VUxfON3bOV814OdGS0,234
+zmq/tests/test_version.py,sha256=JLt9BotySAvFZx9HZebV13f0mKWMSB5nX755qKKaNGk,1334
+zmq/tests/test_win32_shim.py,sha256=Zn2uvq6SOgi-1kv5GU7-ESdeWMYYHwFLRyW5TEsJvfo,1741
+zmq/tests/test_z85.py,sha256=w3aqYwfhduBPMwRvlvWsVXVlqZk1kn7EdEYl7zOgwnQ,2232
+zmq/tests/test_zmqstream.py,sha256=7DSBW5m6RdjoZgoAr5McZ4qox8to8MNasNbFRbrgdBU,2439
+zmq/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+zmq/utils/__pycache__/__init__.cpython-38.pyc,,
+zmq/utils/__pycache__/constant_names.cpython-38.pyc,,
+zmq/utils/__pycache__/garbage.cpython-38.pyc,,
+zmq/utils/__pycache__/interop.cpython-38.pyc,,
+zmq/utils/__pycache__/jsonapi.cpython-38.pyc,,
+zmq/utils/__pycache__/monitor.cpython-38.pyc,,
+zmq/utils/__pycache__/sixcerpt.cpython-38.pyc,,
+zmq/utils/__pycache__/strtypes.cpython-38.pyc,,
+zmq/utils/__pycache__/win32.cpython-38.pyc,,
+zmq/utils/__pycache__/z85.cpython-38.pyc,,
+zmq/utils/buffers.pxd,sha256=ujS1ag4jCcRroOGh4Pg5cMIhFF_2CzFbzAEZrTYgITY,9819
+zmq/utils/compiler.json,sha256=Eqh2fc852f2ja_pxTCwqtypGttYmVv4wuCAqEDeNVdM,295
+zmq/utils/config.json,sha256=kcrTfZhBhqTRIMQkaC4F1tSdddgA7xDsQqcBpjpI57Q,265
+zmq/utils/constant_names.py,sha256=fszAcR-YXXx3aKVFgPfKLPzbS6Ie9BgedIA7uJ4Bw2E,11394
+zmq/utils/garbage.py,sha256=AFyq7cfE-cGgWr9ew4_CgVmmY0K4M1PA2DckxTxtVs4,5935
+zmq/utils/getpid_compat.h,sha256=i2vfvjRbU6gJJqCI9S1XpA6ZT__qKG9Jy9ENvXwp1hA,103
+zmq/utils/interop.py,sha256=w9Wg2AwH4_FhR990EpMZ8bDEiRl8jAydBgBtuin1Yhc,709
+zmq/utils/ipcmaxlen.h,sha256=GSGxQXMmdwwWejDQqoXxwJJIVnFxfbrBQyJ0OOkT_Cg,454
+zmq/utils/jsonapi.py,sha256=p9ctzA-WiMTu2hIJmQfT9BKZTmQqXpZZ-h4avNzEwmU,1393
+zmq/utils/monitor.py,sha256=3sqErDjK0HKUX9Pc1QImK3D2rF4gM_eO5x7jNIhb_t4,2088
+zmq/utils/mutex.h,sha256=tX_0NUDDv9s91JDDFW7UQh2wvqqaKzL9EX7dJUnQfi4,1625
+zmq/utils/pyversion_compat.h,sha256=amcxPh7Z2kcWSfl772kIot5LGegX13ybw23CIJ7X_dE,1324
+zmq/utils/sixcerpt.py,sha256=kbEMWFdQLjMR1XNIYE9CLCQbFZZjxxD5LvfiS71s8NQ,1886
+zmq/utils/strtypes.py,sha256=jx83QcmmlwXXXKCjYllJW_0K6SZKTnfCjwENzx_7yzo,1202
+zmq/utils/win32.py,sha256=A66gXW4oW96VHVIgsPFP8SC9DD4d1plumOrJHX5bCiM,5124
+zmq/utils/z85.py,sha256=qhFwvRQMmRAAxbQajhyy24iEXqr5Azg1_qw1jidvqgQ,2000
+zmq/utils/zmq_compat.h,sha256=854HTW41WdOSMOonCdBc4LOcpIf9nA3uOKSa3NygXpQ,3177
+zmq/utils/zmq_constants.h,sha256=Zu6ldGX0MSQc8te9KkVn2R9tBh_QMBLKxiGJf2XvjB0,18695
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/REQUESTED b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/WHEEL b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..0796649fadbce7a476ef28c9645dbc814104a6e9
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.34.2)
+Root-Is-Purelib: false
+Tag: cp38-cp38-manylinux1_x86_64
+
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/top_level.txt b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8ae73d64e419ddd190a0fa25047aa9de7fb75a61
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyzmq-19.0.2.dist-info/top_level.txt
@@ -0,0 +1 @@
+zmq
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ruamel_yaml_conda-0.15.80.dist-info/INSTALLER b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ruamel_yaml_conda-0.15.80.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..f79e4cb9aaf0b2d9e8ba78861e2071317b2384b3
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ruamel_yaml_conda-0.15.80.dist-info/INSTALLER
@@ -0,0 +1 @@
+conda
\ No newline at end of file
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ruamel_yaml_conda-0.15.80.dist-info/LICENSE b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ruamel_yaml_conda-0.15.80.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..3fe0f5bbee024f61745ef031ca3036f774624410
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ruamel_yaml_conda-0.15.80.dist-info/LICENSE
@@ -0,0 +1,21 @@
+ The MIT License (MIT)
+
+ Copyright (c) 2014-2018 Anthon van der Neut, Ruamel bvba
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ruamel_yaml_conda-0.15.80.dist-info/REQUESTED b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ruamel_yaml_conda-0.15.80.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ruamel_yaml_conda-0.15.80.dist-info/WHEEL b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ruamel_yaml_conda-0.15.80.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..7ae20a049f3296c665c3bc701f4e6f33be833438
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ruamel_yaml_conda-0.15.80.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.0)
+Root-Is-Purelib: false
+Tag: cp38-cp38-linux_x86_64
+
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/setuptools-69.2.0-py3.12.egg-info/PKG-INFO b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/setuptools-69.2.0-py3.12.egg-info/PKG-INFO
new file mode 100644
index 0000000000000000000000000000000000000000..0f039a3b0e5ba999241930ea8cbe730eb5301135
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/setuptools-69.2.0-py3.12.egg-info/PKG-INFO
@@ -0,0 +1,131 @@
+Metadata-Version: 2.1
+Name: setuptools
+Version: 69.2.0
+Summary: Easily download, build, install, upgrade, and uninstall Python packages
+Home-page: https://github.com/pypa/setuptools
+Author: Python Packaging Authority
+Author-email: distutils-sig@python.org
+Project-URL: Documentation, https://setuptools.pypa.io/
+Project-URL: Changelog, https://setuptools.pypa.io/en/stable/history.html
+Keywords: CPAN PyPI distutils eggs package management
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: System :: Archiving :: Packaging
+Classifier: Topic :: System :: Systems Administration
+Classifier: Topic :: Utilities
+Requires-Python: >=3.8
+License-File: LICENSE
+Provides-Extra: testing
+Requires-Dist: pytest>=6; extra == "testing"
+Requires-Dist: pytest-checkdocs>=2.4; extra == "testing"
+Requires-Dist: pytest-cov; platform_python_implementation != "PyPy" and extra == "testing"
+Requires-Dist: pytest-mypy>=0.9.1; platform_python_implementation != "PyPy" and extra == "testing"
+Requires-Dist: pytest-enabler>=2.2; extra == "testing"
+Requires-Dist: pytest-ruff>=0.2.1; sys_platform != "cygwin" and extra == "testing"
+Requires-Dist: virtualenv>=13.0.0; extra == "testing"
+Requires-Dist: wheel; extra == "testing"
+Requires-Dist: pip>=19.1; extra == "testing"
+Requires-Dist: packaging>=23.2; extra == "testing"
+Requires-Dist: jaraco.envs>=2.2; extra == "testing"
+Requires-Dist: pytest-xdist>=3; extra == "testing"
+Requires-Dist: jaraco.path>=3.2.0; extra == "testing"
+Requires-Dist: build[virtualenv]; extra == "testing"
+Requires-Dist: filelock>=3.4.0; extra == "testing"
+Requires-Dist: ini2toml[lite]>=0.9; extra == "testing"
+Requires-Dist: tomli-w>=1.0.0; extra == "testing"
+Requires-Dist: pytest-timeout; extra == "testing"
+Requires-Dist: pytest-perf; sys_platform != "cygwin" and extra == "testing"
+Requires-Dist: jaraco.develop>=7.21; (python_version >= "3.9" and sys_platform != "cygwin") and extra == "testing"
+Requires-Dist: pytest-home>=0.5; extra == "testing"
+Requires-Dist: mypy==1.9; extra == "testing"
+Requires-Dist: tomli; extra == "testing"
+Requires-Dist: importlib_metadata; extra == "testing"
+Provides-Extra: testing-integration
+Requires-Dist: pytest; extra == "testing-integration"
+Requires-Dist: pytest-xdist; extra == "testing-integration"
+Requires-Dist: pytest-enabler; extra == "testing-integration"
+Requires-Dist: virtualenv>=13.0.0; extra == "testing-integration"
+Requires-Dist: tomli; extra == "testing-integration"
+Requires-Dist: wheel; extra == "testing-integration"
+Requires-Dist: jaraco.path>=3.2.0; extra == "testing-integration"
+Requires-Dist: jaraco.envs>=2.2; extra == "testing-integration"
+Requires-Dist: build[virtualenv]>=1.0.3; extra == "testing-integration"
+Requires-Dist: filelock>=3.4.0; extra == "testing-integration"
+Requires-Dist: packaging>=23.2; extra == "testing-integration"
+Provides-Extra: docs
+Requires-Dist: sphinx>=3.5; extra == "docs"
+Requires-Dist: sphinx<7.2.5; extra == "docs"
+Requires-Dist: jaraco.packaging>=9.3; extra == "docs"
+Requires-Dist: rst.linker>=1.9; extra == "docs"
+Requires-Dist: furo; extra == "docs"
+Requires-Dist: sphinx-lint; extra == "docs"
+Requires-Dist: jaraco.tidelift>=1.4; extra == "docs"
+Requires-Dist: pygments-github-lexers==0.0.5; extra == "docs"
+Requires-Dist: sphinx-favicon; extra == "docs"
+Requires-Dist: sphinx-inline-tabs; extra == "docs"
+Requires-Dist: sphinx-reredirects; extra == "docs"
+Requires-Dist: sphinxcontrib-towncrier; extra == "docs"
+Requires-Dist: sphinx-notfound-page<2,>=1; extra == "docs"
+Provides-Extra: ssl
+Provides-Extra: certs
+
+.. image:: https://img.shields.io/pypi/v/setuptools.svg
+   :target: https://pypi.org/project/setuptools
+
+.. image:: https://img.shields.io/pypi/pyversions/setuptools.svg
+
+.. image:: https://github.com/pypa/setuptools/actions/workflows/main.yml/badge.svg
+   :target: https://github.com/pypa/setuptools/actions?query=workflow%3A%22tests%22
+   :alt: tests
+
+.. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/charliermarsh/ruff/main/assets/badge/v2.json
+    :target: https://github.com/astral-sh/ruff
+    :alt: Ruff
+
+.. image:: https://img.shields.io/readthedocs/setuptools/latest.svg
+    :target: https://setuptools.pypa.io
+
+.. image:: https://img.shields.io/badge/skeleton-2024-informational
+   :target: https://blog.jaraco.com/skeleton
+
+.. image:: https://img.shields.io/codecov/c/github/pypa/setuptools/master.svg?logo=codecov&logoColor=white
+   :target: https://codecov.io/gh/pypa/setuptools
+
+.. image:: https://tidelift.com/badges/github/pypa/setuptools?style=flat
+   :target: https://tidelift.com/subscription/pkg/pypi-setuptools?utm_source=pypi-setuptools&utm_medium=readme
+
+.. image:: https://img.shields.io/discord/803025117553754132
+   :target: https://discord.com/channels/803025117553754132/815945031150993468
+   :alt: Discord
+
+See the `Quickstart `_
+and the `User's Guide `_ for
+instructions on how to use Setuptools.
+
+Questions and comments should be directed to `GitHub Discussions
+`_.
+Bug reports and especially tested patches may be
+submitted directly to the `bug tracker
+`_.
+
+
+Code of Conduct
+===============
+
+Everyone interacting in the setuptools project's codebases, issue trackers,
+chat rooms, and fora is expected to follow the
+`PSF Code of Conduct `_.
+
+
+For Enterprise
+==============
+
+Available as part of the Tidelift Subscription.
+
+Setuptools and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use.
+
+`Learn more `_.
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/setuptools-69.2.0-py3.12.egg-info/SOURCES.txt b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/setuptools-69.2.0-py3.12.egg-info/SOURCES.txt
new file mode 100644
index 0000000000000000000000000000000000000000..cce92748770b17633a8b899b928fb519933ebf7d
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/setuptools-69.2.0-py3.12.egg-info/SOURCES.txt
@@ -0,0 +1,521 @@
+LICENSE
+MANIFEST.in
+NEWS.rst
+README.rst
+conftest.py
+exercises.py
+launcher.c
+pyproject.toml
+pytest.ini
+setup.cfg
+setup.py
+tox.ini
+_distutils_hack/__init__.py
+_distutils_hack/override.py
+docs/artwork.rst
+docs/build_meta.rst
+docs/conf.py
+docs/history.rst
+docs/index.rst
+docs/pkg_resources.rst
+docs/python 2 sunset.rst
+docs/roadmap.rst
+docs/setuptools.rst
+docs/deprecated/changed_keywords.rst
+docs/deprecated/commands.rst
+docs/deprecated/dependency_links.rst
+docs/deprecated/distutils-legacy.rst
+docs/deprecated/easy_install.rst
+docs/deprecated/functionalities.rst
+docs/deprecated/index.rst
+docs/deprecated/python_eggs.rst
+docs/deprecated/resource_extraction.rst
+docs/deprecated/zip_safe.rst
+docs/deprecated/distutils/_setuptools_disclaimer.rst
+docs/deprecated/distutils/apiref.rst
+docs/deprecated/distutils/builtdist.rst
+docs/deprecated/distutils/commandref.rst
+docs/deprecated/distutils/configfile.rst
+docs/deprecated/distutils/examples.rst
+docs/deprecated/distutils/extending.rst
+docs/deprecated/distutils/index.rst
+docs/deprecated/distutils/introduction.rst
+docs/deprecated/distutils/packageindex.rst
+docs/deprecated/distutils/setupscript.rst
+docs/deprecated/distutils/sourcedist.rst
+docs/deprecated/distutils/uploading.rst
+docs/development/developer-guide.rst
+docs/development/index.rst
+docs/development/releases.rst
+docs/references/keywords.rst
+docs/userguide/datafiles.rst
+docs/userguide/declarative_config.rst
+docs/userguide/dependency_management.rst
+docs/userguide/development_mode.rst
+docs/userguide/distribution.rst
+docs/userguide/entry_point.rst
+docs/userguide/ext_modules.rst
+docs/userguide/extension.rst
+docs/userguide/index.rst
+docs/userguide/miscellaneous.rst
+docs/userguide/package_discovery.rst
+docs/userguide/pyproject_config.rst
+docs/userguide/quickstart.rst
+newsfragments/.gitignore
+newsfragments/README.rst
+pkg_resources/__init__.py
+pkg_resources/api_tests.txt
+pkg_resources/_vendor/__init__.py
+pkg_resources/_vendor/typing_extensions.py
+pkg_resources/_vendor/vendored.txt
+pkg_resources/_vendor/zipp.py
+pkg_resources/_vendor/importlib_resources/__init__.py
+pkg_resources/_vendor/importlib_resources/_adapters.py
+pkg_resources/_vendor/importlib_resources/_common.py
+pkg_resources/_vendor/importlib_resources/_compat.py
+pkg_resources/_vendor/importlib_resources/_itertools.py
+pkg_resources/_vendor/importlib_resources/_legacy.py
+pkg_resources/_vendor/importlib_resources/abc.py
+pkg_resources/_vendor/importlib_resources/py.typed
+pkg_resources/_vendor/importlib_resources/readers.py
+pkg_resources/_vendor/importlib_resources/simple.py
+pkg_resources/_vendor/importlib_resources-5.10.2.dist-info/top_level.txt
+pkg_resources/_vendor/importlib_resources/tests/__init__.py
+pkg_resources/_vendor/importlib_resources/tests/_compat.py
+pkg_resources/_vendor/importlib_resources/tests/_path.py
+pkg_resources/_vendor/importlib_resources/tests/test_compatibilty_files.py
+pkg_resources/_vendor/importlib_resources/tests/test_contents.py
+pkg_resources/_vendor/importlib_resources/tests/test_files.py
+pkg_resources/_vendor/importlib_resources/tests/test_open.py
+pkg_resources/_vendor/importlib_resources/tests/test_path.py
+pkg_resources/_vendor/importlib_resources/tests/test_read.py
+pkg_resources/_vendor/importlib_resources/tests/test_reader.py
+pkg_resources/_vendor/importlib_resources/tests/test_resource.py
+pkg_resources/_vendor/importlib_resources/tests/update-zips.py
+pkg_resources/_vendor/importlib_resources/tests/util.py
+pkg_resources/_vendor/importlib_resources/tests/data01/__init__.py
+pkg_resources/_vendor/importlib_resources/tests/data01/subdirectory/__init__.py
+pkg_resources/_vendor/importlib_resources/tests/data02/__init__.py
+pkg_resources/_vendor/importlib_resources/tests/data02/one/__init__.py
+pkg_resources/_vendor/importlib_resources/tests/data02/one/resource1.txt
+pkg_resources/_vendor/importlib_resources/tests/data02/two/__init__.py
+pkg_resources/_vendor/importlib_resources/tests/data02/two/resource2.txt
+pkg_resources/_vendor/importlib_resources/tests/zipdata01/__init__.py
+pkg_resources/_vendor/importlib_resources/tests/zipdata02/__init__.py
+pkg_resources/_vendor/jaraco/__init__.py
+pkg_resources/_vendor/jaraco/context.py
+pkg_resources/_vendor/jaraco/functools.py
+pkg_resources/_vendor/jaraco.context-4.3.0.dist-info/top_level.txt
+pkg_resources/_vendor/jaraco.functools-3.6.0.dist-info/top_level.txt
+pkg_resources/_vendor/jaraco.text-3.7.0.dist-info/top_level.txt
+pkg_resources/_vendor/jaraco/text/Lorem ipsum.txt
+pkg_resources/_vendor/jaraco/text/__init__.py
+pkg_resources/_vendor/more_itertools/__init__.py
+pkg_resources/_vendor/more_itertools/__init__.pyi
+pkg_resources/_vendor/more_itertools/more.py
+pkg_resources/_vendor/more_itertools/more.pyi
+pkg_resources/_vendor/more_itertools/py.typed
+pkg_resources/_vendor/more_itertools/recipes.py
+pkg_resources/_vendor/more_itertools/recipes.pyi
+pkg_resources/_vendor/packaging/__init__.py
+pkg_resources/_vendor/packaging/_elffile.py
+pkg_resources/_vendor/packaging/_manylinux.py
+pkg_resources/_vendor/packaging/_musllinux.py
+pkg_resources/_vendor/packaging/_parser.py
+pkg_resources/_vendor/packaging/_structures.py
+pkg_resources/_vendor/packaging/_tokenizer.py
+pkg_resources/_vendor/packaging/markers.py
+pkg_resources/_vendor/packaging/metadata.py
+pkg_resources/_vendor/packaging/py.typed
+pkg_resources/_vendor/packaging/requirements.py
+pkg_resources/_vendor/packaging/specifiers.py
+pkg_resources/_vendor/packaging/tags.py
+pkg_resources/_vendor/packaging/utils.py
+pkg_resources/_vendor/packaging/version.py
+pkg_resources/_vendor/platformdirs/__init__.py
+pkg_resources/_vendor/platformdirs/__main__.py
+pkg_resources/_vendor/platformdirs/android.py
+pkg_resources/_vendor/platformdirs/api.py
+pkg_resources/_vendor/platformdirs/macos.py
+pkg_resources/_vendor/platformdirs/py.typed
+pkg_resources/_vendor/platformdirs/unix.py
+pkg_resources/_vendor/platformdirs/version.py
+pkg_resources/_vendor/platformdirs/windows.py
+pkg_resources/_vendor/zipp-3.7.0.dist-info/top_level.txt
+pkg_resources/extern/__init__.py
+pkg_resources/tests/__init__.py
+pkg_resources/tests/test_find_distributions.py
+pkg_resources/tests/test_markers.py
+pkg_resources/tests/test_pkg_resources.py
+pkg_resources/tests/test_resources.py
+pkg_resources/tests/test_working_set.py
+pkg_resources/tests/data/my-test-package-source/setup.cfg
+pkg_resources/tests/data/my-test-package-source/setup.py
+pkg_resources/tests/data/my-test-package-zip/my-test-package.zip
+pkg_resources/tests/data/my-test-package_unpacked-egg/my_test_package-1.0-py3.7.egg/EGG-INFO/PKG-INFO
+pkg_resources/tests/data/my-test-package_unpacked-egg/my_test_package-1.0-py3.7.egg/EGG-INFO/SOURCES.txt
+pkg_resources/tests/data/my-test-package_unpacked-egg/my_test_package-1.0-py3.7.egg/EGG-INFO/dependency_links.txt
+pkg_resources/tests/data/my-test-package_unpacked-egg/my_test_package-1.0-py3.7.egg/EGG-INFO/top_level.txt
+pkg_resources/tests/data/my-test-package_unpacked-egg/my_test_package-1.0-py3.7.egg/EGG-INFO/zip-safe
+pkg_resources/tests/data/my-test-package_zipped-egg/my_test_package-1.0-py3.7.egg
+setuptools/__init__.py
+setuptools/_core_metadata.py
+setuptools/_entry_points.py
+setuptools/_imp.py
+setuptools/_importlib.py
+setuptools/_itertools.py
+setuptools/_normalization.py
+setuptools/_path.py
+setuptools/_reqs.py
+setuptools/archive_util.py
+setuptools/build_meta.py
+setuptools/cli-32.exe
+setuptools/cli-64.exe
+setuptools/cli-arm64.exe
+setuptools/cli.exe
+setuptools/dep_util.py
+setuptools/depends.py
+setuptools/discovery.py
+setuptools/dist.py
+setuptools/errors.py
+setuptools/extension.py
+setuptools/glob.py
+setuptools/gui-32.exe
+setuptools/gui-64.exe
+setuptools/gui-arm64.exe
+setuptools/gui.exe
+setuptools/installer.py
+setuptools/launch.py
+setuptools/logging.py
+setuptools/modified.py
+setuptools/monkey.py
+setuptools/msvc.py
+setuptools/namespaces.py
+setuptools/package_index.py
+setuptools/sandbox.py
+setuptools/script (dev).tmpl
+setuptools/script.tmpl
+setuptools/unicode_utils.py
+setuptools/version.py
+setuptools/warnings.py
+setuptools/wheel.py
+setuptools/windows_support.py
+setuptools.egg-info/PKG-INFO
+setuptools.egg-info/SOURCES.txt
+setuptools.egg-info/dependency_links.txt
+setuptools.egg-info/entry_points.txt
+setuptools.egg-info/requires.txt
+setuptools.egg-info/top_level.txt
+setuptools/_distutils/__init__.py
+setuptools/_distutils/_collections.py
+setuptools/_distutils/_functools.py
+setuptools/_distutils/_log.py
+setuptools/_distutils/_macos_compat.py
+setuptools/_distutils/_modified.py
+setuptools/_distutils/_msvccompiler.py
+setuptools/_distutils/archive_util.py
+setuptools/_distutils/bcppcompiler.py
+setuptools/_distutils/ccompiler.py
+setuptools/_distutils/cmd.py
+setuptools/_distutils/config.py
+setuptools/_distutils/core.py
+setuptools/_distutils/cygwinccompiler.py
+setuptools/_distutils/debug.py
+setuptools/_distutils/dep_util.py
+setuptools/_distutils/dir_util.py
+setuptools/_distutils/dist.py
+setuptools/_distutils/errors.py
+setuptools/_distutils/extension.py
+setuptools/_distutils/fancy_getopt.py
+setuptools/_distutils/file_util.py
+setuptools/_distutils/filelist.py
+setuptools/_distutils/log.py
+setuptools/_distutils/msvc9compiler.py
+setuptools/_distutils/msvccompiler.py
+setuptools/_distutils/py38compat.py
+setuptools/_distutils/py39compat.py
+setuptools/_distutils/spawn.py
+setuptools/_distutils/sysconfig.py
+setuptools/_distutils/text_file.py
+setuptools/_distutils/unixccompiler.py
+setuptools/_distutils/util.py
+setuptools/_distutils/version.py
+setuptools/_distutils/versionpredicate.py
+setuptools/_distutils/command/__init__.py
+setuptools/_distutils/command/_framework_compat.py
+setuptools/_distutils/command/bdist.py
+setuptools/_distutils/command/bdist_dumb.py
+setuptools/_distutils/command/bdist_rpm.py
+setuptools/_distutils/command/build.py
+setuptools/_distutils/command/build_clib.py
+setuptools/_distutils/command/build_ext.py
+setuptools/_distutils/command/build_py.py
+setuptools/_distutils/command/build_scripts.py
+setuptools/_distutils/command/check.py
+setuptools/_distutils/command/clean.py
+setuptools/_distutils/command/config.py
+setuptools/_distutils/command/install.py
+setuptools/_distutils/command/install_data.py
+setuptools/_distutils/command/install_egg_info.py
+setuptools/_distutils/command/install_headers.py
+setuptools/_distutils/command/install_lib.py
+setuptools/_distutils/command/install_scripts.py
+setuptools/_distutils/command/py37compat.py
+setuptools/_distutils/command/register.py
+setuptools/_distutils/command/sdist.py
+setuptools/_distutils/command/upload.py
+setuptools/_distutils/command/wininst-10.0-amd64.exe
+setuptools/_distutils/command/wininst-10.0.exe
+setuptools/_distutils/command/wininst-14.0-amd64.exe
+setuptools/_distutils/command/wininst-14.0.exe
+setuptools/_distutils/command/wininst-6.0.exe
+setuptools/_distutils/command/wininst-7.1.exe
+setuptools/_distutils/command/wininst-8.0.exe
+setuptools/_distutils/command/wininst-9.0-amd64.exe
+setuptools/_distutils/command/wininst-9.0.exe
+setuptools/_distutils/tests/__init__.py
+setuptools/_distutils/tests/py37compat.py
+setuptools/_distutils/tests/py38compat.py
+setuptools/_distutils/tests/support.py
+setuptools/_distutils/tests/test_archive_util.py
+setuptools/_distutils/tests/test_bdist.py
+setuptools/_distutils/tests/test_bdist_dumb.py
+setuptools/_distutils/tests/test_bdist_rpm.py
+setuptools/_distutils/tests/test_build.py
+setuptools/_distutils/tests/test_build_clib.py
+setuptools/_distutils/tests/test_build_ext.py
+setuptools/_distutils/tests/test_build_py.py
+setuptools/_distutils/tests/test_build_scripts.py
+setuptools/_distutils/tests/test_ccompiler.py
+setuptools/_distutils/tests/test_check.py
+setuptools/_distutils/tests/test_clean.py
+setuptools/_distutils/tests/test_cmd.py
+setuptools/_distutils/tests/test_config.py
+setuptools/_distutils/tests/test_config_cmd.py
+setuptools/_distutils/tests/test_core.py
+setuptools/_distutils/tests/test_cygwinccompiler.py
+setuptools/_distutils/tests/test_dir_util.py
+setuptools/_distutils/tests/test_dist.py
+setuptools/_distutils/tests/test_extension.py
+setuptools/_distutils/tests/test_file_util.py
+setuptools/_distutils/tests/test_filelist.py
+setuptools/_distutils/tests/test_install.py
+setuptools/_distutils/tests/test_install_data.py
+setuptools/_distutils/tests/test_install_headers.py
+setuptools/_distutils/tests/test_install_lib.py
+setuptools/_distutils/tests/test_install_scripts.py
+setuptools/_distutils/tests/test_log.py
+setuptools/_distutils/tests/test_modified.py
+setuptools/_distutils/tests/test_msvc9compiler.py
+setuptools/_distutils/tests/test_msvccompiler.py
+setuptools/_distutils/tests/test_register.py
+setuptools/_distutils/tests/test_sdist.py
+setuptools/_distutils/tests/test_spawn.py
+setuptools/_distutils/tests/test_sysconfig.py
+setuptools/_distutils/tests/test_text_file.py
+setuptools/_distutils/tests/test_unixccompiler.py
+setuptools/_distutils/tests/test_upload.py
+setuptools/_distutils/tests/test_util.py
+setuptools/_distutils/tests/test_version.py
+setuptools/_distutils/tests/test_versionpredicate.py
+setuptools/_distutils/tests/unix_compat.py
+setuptools/_vendor/__init__.py
+setuptools/_vendor/ordered_set.py
+setuptools/_vendor/typing_extensions.py
+setuptools/_vendor/vendored.txt
+setuptools/_vendor/zipp.py
+setuptools/_vendor/importlib_metadata/__init__.py
+setuptools/_vendor/importlib_metadata/_adapters.py
+setuptools/_vendor/importlib_metadata/_collections.py
+setuptools/_vendor/importlib_metadata/_compat.py
+setuptools/_vendor/importlib_metadata/_functools.py
+setuptools/_vendor/importlib_metadata/_itertools.py
+setuptools/_vendor/importlib_metadata/_meta.py
+setuptools/_vendor/importlib_metadata/_py39compat.py
+setuptools/_vendor/importlib_metadata/_text.py
+setuptools/_vendor/importlib_metadata/py.typed
+setuptools/_vendor/importlib_metadata-6.0.0.dist-info/top_level.txt
+setuptools/_vendor/importlib_resources/__init__.py
+setuptools/_vendor/importlib_resources/_adapters.py
+setuptools/_vendor/importlib_resources/_common.py
+setuptools/_vendor/importlib_resources/_compat.py
+setuptools/_vendor/importlib_resources/_itertools.py
+setuptools/_vendor/importlib_resources/_legacy.py
+setuptools/_vendor/importlib_resources/abc.py
+setuptools/_vendor/importlib_resources/py.typed
+setuptools/_vendor/importlib_resources/readers.py
+setuptools/_vendor/importlib_resources/simple.py
+setuptools/_vendor/importlib_resources-5.10.2.dist-info/top_level.txt
+setuptools/_vendor/importlib_resources/tests/__init__.py
+setuptools/_vendor/importlib_resources/tests/_compat.py
+setuptools/_vendor/importlib_resources/tests/_path.py
+setuptools/_vendor/importlib_resources/tests/test_compatibilty_files.py
+setuptools/_vendor/importlib_resources/tests/test_contents.py
+setuptools/_vendor/importlib_resources/tests/test_files.py
+setuptools/_vendor/importlib_resources/tests/test_open.py
+setuptools/_vendor/importlib_resources/tests/test_path.py
+setuptools/_vendor/importlib_resources/tests/test_read.py
+setuptools/_vendor/importlib_resources/tests/test_reader.py
+setuptools/_vendor/importlib_resources/tests/test_resource.py
+setuptools/_vendor/importlib_resources/tests/update-zips.py
+setuptools/_vendor/importlib_resources/tests/util.py
+setuptools/_vendor/importlib_resources/tests/data01/__init__.py
+setuptools/_vendor/importlib_resources/tests/data01/subdirectory/__init__.py
+setuptools/_vendor/importlib_resources/tests/data02/__init__.py
+setuptools/_vendor/importlib_resources/tests/data02/one/__init__.py
+setuptools/_vendor/importlib_resources/tests/data02/one/resource1.txt
+setuptools/_vendor/importlib_resources/tests/data02/two/__init__.py
+setuptools/_vendor/importlib_resources/tests/data02/two/resource2.txt
+setuptools/_vendor/importlib_resources/tests/zipdata01/__init__.py
+setuptools/_vendor/importlib_resources/tests/zipdata02/__init__.py
+setuptools/_vendor/jaraco/__init__.py
+setuptools/_vendor/jaraco/context.py
+setuptools/_vendor/jaraco/functools.py
+setuptools/_vendor/jaraco.context-4.3.0.dist-info/top_level.txt
+setuptools/_vendor/jaraco.functools-3.6.0.dist-info/top_level.txt
+setuptools/_vendor/jaraco.text-3.7.0.dist-info/top_level.txt
+setuptools/_vendor/jaraco/text/Lorem ipsum.txt
+setuptools/_vendor/jaraco/text/__init__.py
+setuptools/_vendor/more_itertools/__init__.py
+setuptools/_vendor/more_itertools/__init__.pyi
+setuptools/_vendor/more_itertools/more.py
+setuptools/_vendor/more_itertools/more.pyi
+setuptools/_vendor/more_itertools/py.typed
+setuptools/_vendor/more_itertools/recipes.py
+setuptools/_vendor/more_itertools/recipes.pyi
+setuptools/_vendor/more_itertools-8.8.0.dist-info/top_level.txt
+setuptools/_vendor/ordered_set-3.1.1.dist-info/top_level.txt
+setuptools/_vendor/packaging/__init__.py
+setuptools/_vendor/packaging/_elffile.py
+setuptools/_vendor/packaging/_manylinux.py
+setuptools/_vendor/packaging/_musllinux.py
+setuptools/_vendor/packaging/_parser.py
+setuptools/_vendor/packaging/_structures.py
+setuptools/_vendor/packaging/_tokenizer.py
+setuptools/_vendor/packaging/markers.py
+setuptools/_vendor/packaging/metadata.py
+setuptools/_vendor/packaging/py.typed
+setuptools/_vendor/packaging/requirements.py
+setuptools/_vendor/packaging/specifiers.py
+setuptools/_vendor/packaging/tags.py
+setuptools/_vendor/packaging/utils.py
+setuptools/_vendor/packaging/version.py
+setuptools/_vendor/tomli/__init__.py
+setuptools/_vendor/tomli/_parser.py
+setuptools/_vendor/tomli/_re.py
+setuptools/_vendor/tomli/_types.py
+setuptools/_vendor/tomli/py.typed
+setuptools/_vendor/zipp-3.7.0.dist-info/top_level.txt
+setuptools/command/__init__.py
+setuptools/command/_requirestxt.py
+setuptools/command/alias.py
+setuptools/command/bdist_egg.py
+setuptools/command/bdist_rpm.py
+setuptools/command/build.py
+setuptools/command/build_clib.py
+setuptools/command/build_ext.py
+setuptools/command/build_py.py
+setuptools/command/develop.py
+setuptools/command/dist_info.py
+setuptools/command/easy_install.py
+setuptools/command/editable_wheel.py
+setuptools/command/egg_info.py
+setuptools/command/install.py
+setuptools/command/install_egg_info.py
+setuptools/command/install_lib.py
+setuptools/command/install_scripts.py
+setuptools/command/launcher manifest.xml
+setuptools/command/register.py
+setuptools/command/rotate.py
+setuptools/command/saveopts.py
+setuptools/command/sdist.py
+setuptools/command/setopt.py
+setuptools/command/test.py
+setuptools/command/upload.py
+setuptools/command/upload_docs.py
+setuptools/compat/__init__.py
+setuptools/compat/py310.py
+setuptools/compat/py311.py
+setuptools/compat/py39.py
+setuptools/config/__init__.py
+setuptools/config/_apply_pyprojecttoml.py
+setuptools/config/expand.py
+setuptools/config/pyprojecttoml.py
+setuptools/config/setupcfg.py
+setuptools/config/_validate_pyproject/__init__.py
+setuptools/config/_validate_pyproject/error_reporting.py
+setuptools/config/_validate_pyproject/extra_validations.py
+setuptools/config/_validate_pyproject/fastjsonschema_exceptions.py
+setuptools/config/_validate_pyproject/fastjsonschema_validations.py
+setuptools/config/_validate_pyproject/formats.py
+setuptools/extern/__init__.py
+setuptools/tests/__init__.py
+setuptools/tests/contexts.py
+setuptools/tests/environment.py
+setuptools/tests/fixtures.py
+setuptools/tests/mod_with_constant.py
+setuptools/tests/namespaces.py
+setuptools/tests/script-with-bom.py
+setuptools/tests/server.py
+setuptools/tests/test_archive_util.py
+setuptools/tests/test_bdist_deprecations.py
+setuptools/tests/test_bdist_egg.py
+setuptools/tests/test_build.py
+setuptools/tests/test_build_clib.py
+setuptools/tests/test_build_ext.py
+setuptools/tests/test_build_meta.py
+setuptools/tests/test_build_py.py
+setuptools/tests/test_config_discovery.py
+setuptools/tests/test_core_metadata.py
+setuptools/tests/test_depends.py
+setuptools/tests/test_develop.py
+setuptools/tests/test_dist.py
+setuptools/tests/test_dist_info.py
+setuptools/tests/test_distutils_adoption.py
+setuptools/tests/test_easy_install.py
+setuptools/tests/test_editable_install.py
+setuptools/tests/test_egg_info.py
+setuptools/tests/test_extern.py
+setuptools/tests/test_find_packages.py
+setuptools/tests/test_find_py_modules.py
+setuptools/tests/test_glob.py
+setuptools/tests/test_install_scripts.py
+setuptools/tests/test_integration.py
+setuptools/tests/test_logging.py
+setuptools/tests/test_manifest.py
+setuptools/tests/test_msvc14.py
+setuptools/tests/test_namespaces.py
+setuptools/tests/test_packageindex.py
+setuptools/tests/test_register.py
+setuptools/tests/test_sandbox.py
+setuptools/tests/test_sdist.py
+setuptools/tests/test_setopt.py
+setuptools/tests/test_setuptools.py
+setuptools/tests/test_test.py
+setuptools/tests/test_unicode_utils.py
+setuptools/tests/test_upload.py
+setuptools/tests/test_virtualenv.py
+setuptools/tests/test_warnings.py
+setuptools/tests/test_wheel.py
+setuptools/tests/test_windows_wrappers.py
+setuptools/tests/text.py
+setuptools/tests/textwrap.py
+setuptools/tests/config/__init__.py
+setuptools/tests/config/setupcfg_examples.txt
+setuptools/tests/config/test_apply_pyprojecttoml.py
+setuptools/tests/config/test_expand.py
+setuptools/tests/config/test_pyprojecttoml.py
+setuptools/tests/config/test_pyprojecttoml_dynamic_deps.py
+setuptools/tests/config/test_setupcfg.py
+setuptools/tests/config/downloads/__init__.py
+setuptools/tests/config/downloads/preload.py
+setuptools/tests/indexes/test_links_priority/external.html
+setuptools/tests/indexes/test_links_priority/simple/foobar/index.html
+setuptools/tests/integration/__init__.py
+setuptools/tests/integration/helpers.py
+setuptools/tests/integration/test_pip_install_sdist.py
+tools/build_launchers.py
+tools/finalize.py
+tools/generate_validation_code.py
+tools/ppc64le-patch.py
+tools/vendored.py
\ No newline at end of file
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/setuptools-69.2.0-py3.12.egg-info/dependency_links.txt b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/setuptools-69.2.0-py3.12.egg-info/dependency_links.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/setuptools-69.2.0-py3.12.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/setuptools-69.2.0-py3.12.egg-info/entry_points.txt b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/setuptools-69.2.0-py3.12.egg-info/entry_points.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b429cbd8469abc2f6a5b716d8963fb41f1c479b1
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/setuptools-69.2.0-py3.12.egg-info/entry_points.txt
@@ -0,0 +1,56 @@
+[distutils.commands]
+alias = setuptools.command.alias:alias
+bdist_egg = setuptools.command.bdist_egg:bdist_egg
+bdist_rpm = setuptools.command.bdist_rpm:bdist_rpm
+build = setuptools.command.build:build
+build_clib = setuptools.command.build_clib:build_clib
+build_ext = setuptools.command.build_ext:build_ext
+build_py = setuptools.command.build_py:build_py
+develop = setuptools.command.develop:develop
+dist_info = setuptools.command.dist_info:dist_info
+easy_install = setuptools.command.easy_install:easy_install
+editable_wheel = setuptools.command.editable_wheel:editable_wheel
+egg_info = setuptools.command.egg_info:egg_info
+install = setuptools.command.install:install
+install_egg_info = setuptools.command.install_egg_info:install_egg_info
+install_lib = setuptools.command.install_lib:install_lib
+install_scripts = setuptools.command.install_scripts:install_scripts
+rotate = setuptools.command.rotate:rotate
+saveopts = setuptools.command.saveopts:saveopts
+sdist = setuptools.command.sdist:sdist
+setopt = setuptools.command.setopt:setopt
+test = setuptools.command.test:test
+upload_docs = setuptools.command.upload_docs:upload_docs
+
+[distutils.setup_keywords]
+dependency_links = setuptools.dist:assert_string_list
+eager_resources = setuptools.dist:assert_string_list
+entry_points = setuptools.dist:check_entry_points
+exclude_package_data = setuptools.dist:check_package_data
+extras_require = setuptools.dist:check_extras
+include_package_data = setuptools.dist:assert_bool
+install_requires = setuptools.dist:check_requirements
+namespace_packages = setuptools.dist:check_nsp
+package_data = setuptools.dist:check_package_data
+packages = setuptools.dist:check_packages
+python_requires = setuptools.dist:check_specifier
+setup_requires = setuptools.dist:check_requirements
+test_loader = setuptools.dist:check_importable
+test_runner = setuptools.dist:check_importable
+test_suite = setuptools.dist:check_test_suite
+tests_require = setuptools.dist:check_requirements
+use_2to3 = setuptools.dist:invalid_unless_false
+zip_safe = setuptools.dist:assert_bool
+
+[egg_info.writers]
+PKG-INFO = setuptools.command.egg_info:write_pkg_info
+dependency_links.txt = setuptools.command.egg_info:overwrite_arg
+eager_resources.txt = setuptools.command.egg_info:overwrite_arg
+entry_points.txt = setuptools.command.egg_info:write_entries
+namespace_packages.txt = setuptools.command.egg_info:overwrite_arg
+requires.txt = setuptools.command.egg_info:write_requirements
+top_level.txt = setuptools.command.egg_info:write_toplevel_names
+
+[setuptools.finalize_distribution_options]
+keywords = setuptools.dist:Distribution._finalize_setup_keywords
+parent_finalize = setuptools.dist:_Distribution.finalize_options
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/setuptools-69.2.0-py3.12.egg-info/requires.txt b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/setuptools-69.2.0-py3.12.egg-info/requires.txt
new file mode 100644
index 0000000000000000000000000000000000000000..faa8ea090a76638b31bef803136edb2f6376f763
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/setuptools-69.2.0-py3.12.egg-info/requires.txt
@@ -0,0 +1,64 @@
+
+[certs]
+
+[docs]
+sphinx>=3.5
+sphinx<7.2.5
+jaraco.packaging>=9.3
+rst.linker>=1.9
+furo
+sphinx-lint
+jaraco.tidelift>=1.4
+pygments-github-lexers==0.0.5
+sphinx-favicon
+sphinx-inline-tabs
+sphinx-reredirects
+sphinxcontrib-towncrier
+sphinx-notfound-page<2,>=1
+
+[ssl]
+
+[testing]
+pytest>=6
+pytest-checkdocs>=2.4
+pytest-enabler>=2.2
+virtualenv>=13.0.0
+wheel
+pip>=19.1
+packaging>=23.2
+jaraco.envs>=2.2
+pytest-xdist>=3
+jaraco.path>=3.2.0
+build[virtualenv]
+filelock>=3.4.0
+ini2toml[lite]>=0.9
+tomli-w>=1.0.0
+pytest-timeout
+pytest-home>=0.5
+mypy==1.9
+tomli
+importlib_metadata
+
+[testing-integration]
+pytest
+pytest-xdist
+pytest-enabler
+virtualenv>=13.0.0
+tomli
+wheel
+jaraco.path>=3.2.0
+jaraco.envs>=2.2
+build[virtualenv]>=1.0.3
+filelock>=3.4.0
+packaging>=23.2
+
+[testing:platform_python_implementation != "PyPy"]
+pytest-cov
+pytest-mypy>=0.9.1
+
+[testing:python_version >= "3.9" and sys_platform != "cygwin"]
+jaraco.develop>=7.21
+
+[testing:sys_platform != "cygwin"]
+pytest-ruff>=0.2.1
+pytest-perf
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/setuptools-69.2.0-py3.12.egg-info/top_level.txt b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/setuptools-69.2.0-py3.12.egg-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b5ac1070294b478b7cc2ce677207ee08813bfa37
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/setuptools-69.2.0-py3.12.egg-info/top_level.txt
@@ -0,0 +1,3 @@
+_distutils_hack
+pkg_resources
+setuptools
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchmetrics-0.9.1.dist-info/INSTALLER b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchmetrics-0.9.1.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchmetrics-0.9.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchmetrics-0.9.1.dist-info/LICENSE b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchmetrics-0.9.1.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..3fcb0b63ecea84d013a242794a9793e874879810
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchmetrics-0.9.1.dist-info/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2020-2022 PytorchLightning team
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/tmp_inputs_32_1/case00008.nii.gz b/tmp_inputs_32_1/case00008.nii.gz
new file mode 100644
index 0000000000000000000000000000000000000000..f403d1d898386089715fa48661f7b4e385836e23
--- /dev/null
+++ b/tmp_inputs_32_1/case00008.nii.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:38615db6d87c93389b3ced1384c59f82f3ae2b296bf666f985d1ca073b2f8f47
+size 38287786