diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/__init__.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14635db15fdc1c4db1bc49564307a8b14fd44b86 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/__init__.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/_bsdf.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/_bsdf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab46be99fc65892a9b640dba1d36785de78dce1c Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/_bsdf.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/_freeimage.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/_freeimage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54d02d496d87893562a9e9bb3b2b1888ae728be7 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/_freeimage.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/dicom.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/dicom.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98eab88e6e18d4e76469975d09e8908ea17e720c Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/dicom.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/example.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/example.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d3e13cd8df0ff7fdf3572d6deefea2b1801aebf Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/example.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/ffmpeg.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/ffmpeg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbae8bfa4db797507c185c7224e5638cf19665c2 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/ffmpeg.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/freeimage.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/freeimage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14737f1f993108a7a2e63ae90d3e2b5b0ad73c06 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/freeimage.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/freeimagemulti.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/freeimagemulti.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f64631708f394c2487a98aca472c0fb8630eb28 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/freeimagemulti.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/gdal.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/gdal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be6c594b05a757fa030217ad2290f8de34a302d0 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/gdal.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/lytro.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/lytro.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2caef12527dc25f048224f947a81adf589b1c9bd Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/lytro.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/opencv.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/opencv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8a3a7dcb30d2eec384d3dbb5a6d2a05b31d129f Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/opencv.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillow.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fe86bd2dff3f9687e2218d7d008d38a6ddbe648 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillow.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillow_info.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillow_info.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58f14b5c309631eacef17103d72fe4b26aa45318 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillow_info.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillow_legacy.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillow_legacy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..766582e93ee7633413c5ca105bd65c521ee842b5 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillow_legacy.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillowmulti.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillowmulti.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d8e7674dc8d0601ee4e0ec75ba3111a52e168ff Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillowmulti.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/rawpy.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/rawpy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54b49c069ba48fdb44f74c40a52e4942b4624fa0 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/rawpy.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/simpleitk.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/simpleitk.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a08ef7e8a82ecc044eb98b4017f63640671a3c4 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/simpleitk.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/spe.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/spe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a883539b348a01f403dc7614cd42f01492b57b8c Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/spe.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/tifffile.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/tifffile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd454fdd26820ddc36ddf3ee23dce667ff7a8fb1 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/tifffile.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/tifffile_v3.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/tifffile_v3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d310420ce5d67a4c3cca98327a51a2863db662c3 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/imageio/plugins/__pycache__/tifffile_v3.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/_swf.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/_swf.py new file mode 100644 index 0000000000000000000000000000000000000000..98ca3a4b0520200fd78508feeba68102db8b3f9a --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/_swf.py @@ -0,0 +1,897 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. +# This code was taken from https://github.com/almarklein/visvis/blob/master/vvmovie/images2swf.py + +# styletest: ignore E261 + +""" +Provides a function (write_swf) to store a series of numpy arrays in an +SWF movie, that can be played on a wide range of OS's. + +In desperation of wanting to share animated images, and then lacking a good +writer for animated gif or .avi, I decided to look into SWF. This format +is very well documented. + +This is a pure python module to create an SWF file that shows a series +of images. The images are stored using the DEFLATE algorithm (same as +PNG and ZIP and which is included in the standard Python distribution). +As this compression algorithm is much more effective than that used in +GIF images, we obtain better quality (24 bit colors + alpha channel) +while still producesing smaller files (a test showed ~75%). Although +SWF also allows for JPEG compression, doing so would probably require +a third party library for the JPEG encoding/decoding, we could +perhaps do this via Pillow or freeimage. + +sources and tools: + +- SWF on wikipedia +- Adobes "SWF File Format Specification" version 10 + (http://www.adobe.com/devnet/swf/pdf/swf_file_format_spec_v10.pdf) +- swftools (swfdump in specific) for debugging +- iwisoft swf2avi can be used to convert swf to avi/mpg/flv with really + good quality, while file size is reduced with factors 20-100. + A good program in my opinion. The free version has the limitation + of a watermark in the upper left corner. + +""" + +import os +import zlib +import time # noqa +import logging + +import numpy as np + + +logger = logging.getLogger(__name__) + +# todo: use Pillow to support reading JPEG images from SWF? + + +# Base functions and classes + + +class BitArray: + """Dynamic array of bits that automatically resizes + with factors of two. + Append bits using .append() or += + You can reverse bits using .reverse() + """ + + def __init__(self, initvalue=None): + self.data = np.zeros((16,), dtype=np.uint8) + self._len = 0 + if initvalue is not None: + self.append(initvalue) + + def __len__(self): + return self._len # self.data.shape[0] + + def __repr__(self): + return self.data[: self._len].tobytes().decode("ascii") + + def _checkSize(self): + # check length... grow if necessary + arraylen = self.data.shape[0] + if self._len >= arraylen: + tmp = np.zeros((arraylen * 2,), dtype=np.uint8) + tmp[: self._len] = self.data[: self._len] + self.data = tmp + + def __add__(self, value): + self.append(value) + return self + + def append(self, bits): + # check input + if isinstance(bits, BitArray): + bits = str(bits) + if isinstance(bits, int): # pragma: no cover - we dont use it + bits = str(bits) + if not isinstance(bits, str): # pragma: no cover + raise ValueError("Append bits as strings or integers!") + + # add bits + for bit in bits: + self.data[self._len] = ord(bit) + self._len += 1 + self._checkSize() + + def reverse(self): + """In-place reverse.""" + tmp = self.data[: self._len].copy() + self.data[: self._len] = tmp[::-1] + + def tobytes(self): + """Convert to bytes. If necessary, + zeros are padded to the end (right side). + """ + bits = str(self) + + # determine number of bytes + nbytes = 0 + while nbytes * 8 < len(bits): + nbytes += 1 + # pad + bits = bits.ljust(nbytes * 8, "0") + + # go from bits to bytes + bb = bytes() + for i in range(nbytes): + tmp = int(bits[i * 8 : (i + 1) * 8], 2) + bb += int2uint8(tmp) + + # done + return bb + + +def int2uint32(i): + return int(i).to_bytes(4, "little") + + +def int2uint16(i): + return int(i).to_bytes(2, "little") + + +def int2uint8(i): + return int(i).to_bytes(1, "little") + + +def int2bits(i, n=None): + """convert int to a string of bits (0's and 1's in a string), + pad to n elements. Convert back using int(ss,2).""" + ii = i + + # make bits + bb = BitArray() + while ii > 0: + bb += str(ii % 2) + ii = ii >> 1 + bb.reverse() + + # justify + if n is not None: + if len(bb) > n: # pragma: no cover + raise ValueError("int2bits fail: len larger than padlength.") + bb = str(bb).rjust(n, "0") + + # done + return BitArray(bb) + + +def bits2int(bb, n=8): + # Init + value = "" + + # Get value in bits + for i in range(len(bb)): + b = bb[i : i + 1] + tmp = bin(ord(b))[2:] + # value += tmp.rjust(8,'0') + value = tmp.rjust(8, "0") + value + + # Make decimal + return int(value[:n], 2) + + +def get_type_and_len(bb): + """bb should be 6 bytes at least + Return (type, length, length_of_full_tag) + """ + # Init + value = "" + + # Get first 16 bits + for i in range(2): + b = bb[i : i + 1] + tmp = bin(ord(b))[2:] + # value += tmp.rjust(8,'0') + value = tmp.rjust(8, "0") + value + + # Get type and length + type = int(value[:10], 2) + L = int(value[10:], 2) + L2 = L + 2 + + # Long tag header? + if L == 63: # '111111' + value = "" + for i in range(2, 6): + b = bb[i : i + 1] # becomes a single-byte bytes() + tmp = bin(ord(b))[2:] + # value += tmp.rjust(8,'0') + value = tmp.rjust(8, "0") + value + L = int(value, 2) + L2 = L + 6 + + # Done + return type, L, L2 + + +def signedint2bits(i, n=None): + """convert signed int to a string of bits (0's and 1's in a string), + pad to n elements. Negative numbers are stored in 2's complement bit + patterns, thus positive numbers always start with a 0. + """ + + # negative number? + ii = i + if i < 0: + # A negative number, -n, is represented as the bitwise opposite of + ii = abs(ii) - 1 # the positive-zero number n-1. + + # make bits + bb = BitArray() + while ii > 0: + bb += str(ii % 2) + ii = ii >> 1 + bb.reverse() + + # justify + bb = "0" + str(bb) # always need the sign bit in front + if n is not None: + if len(bb) > n: # pragma: no cover + raise ValueError("signedint2bits fail: len larger than padlength.") + bb = bb.rjust(n, "0") + + # was it negative? (then opposite bits) + if i < 0: + bb = bb.replace("0", "x").replace("1", "0").replace("x", "1") + + # done + return BitArray(bb) + + +def twits2bits(arr): + """Given a few (signed) numbers, store them + as compactly as possible in the wat specifief by the swf format. + The numbers are multiplied by 20, assuming they + are twits. + Can be used to make the RECT record. + """ + + # first determine length using non justified bit strings + maxlen = 1 + for i in arr: + tmp = len(signedint2bits(i * 20)) + if tmp > maxlen: + maxlen = tmp + + # build array + bits = int2bits(maxlen, 5) + for i in arr: + bits += signedint2bits(i * 20, maxlen) + + return bits + + +def floats2bits(arr): + """Given a few (signed) numbers, convert them to bits, + stored as FB (float bit values). We always use 16.16. + Negative numbers are not (yet) possible, because I don't + know how the're implemented (ambiguity). + """ + bits = int2bits(31, 5) # 32 does not fit in 5 bits! + for i in arr: + if i < 0: # pragma: no cover + raise ValueError("Dit not implement negative floats!") + i1 = int(i) + i2 = i - i1 + bits += int2bits(i1, 15) + bits += int2bits(i2 * 2**16, 16) + return bits + + +# Base Tag + + +class Tag: + def __init__(self): + self.bytes = bytes() + self.tagtype = -1 + + def process_tag(self): + """Implement this to create the tag.""" + raise NotImplementedError() + + def get_tag(self): + """Calls processTag and attaches the header.""" + self.process_tag() + + # tag to binary + bits = int2bits(self.tagtype, 10) + + # complete header uint16 thing + bits += "1" * 6 # = 63 = 0x3f + # make uint16 + bb = int2uint16(int(str(bits), 2)) + + # now add 32bit length descriptor + bb += int2uint32(len(self.bytes)) + + # done, attach and return + bb += self.bytes + return bb + + def make_rect_record(self, xmin, xmax, ymin, ymax): + """Simply uses makeCompactArray to produce + a RECT Record.""" + return twits2bits([xmin, xmax, ymin, ymax]) + + def make_matrix_record(self, scale_xy=None, rot_xy=None, trans_xy=None): + # empty matrix? + if scale_xy is None and rot_xy is None and trans_xy is None: + return "0" * 8 + + # init + bits = BitArray() + + # scale + if scale_xy: + bits += "1" + bits += floats2bits([scale_xy[0], scale_xy[1]]) + else: + bits += "0" + + # rotation + if rot_xy: + bits += "1" + bits += floats2bits([rot_xy[0], rot_xy[1]]) + else: + bits += "0" + + # translation (no flag here) + if trans_xy: + bits += twits2bits([trans_xy[0], trans_xy[1]]) + else: + bits += twits2bits([0, 0]) + + # done + return bits + + +# Control tags + + +class ControlTag(Tag): + def __init__(self): + Tag.__init__(self) + + +class FileAttributesTag(ControlTag): + def __init__(self): + ControlTag.__init__(self) + self.tagtype = 69 + + def process_tag(self): + self.bytes = "\x00".encode("ascii") * (1 + 3) + + +class ShowFrameTag(ControlTag): + def __init__(self): + ControlTag.__init__(self) + self.tagtype = 1 + + def process_tag(self): + self.bytes = bytes() + + +class SetBackgroundTag(ControlTag): + """Set the color in 0-255, or 0-1 (if floats given).""" + + def __init__(self, *rgb): + self.tagtype = 9 + if len(rgb) == 1: + rgb = rgb[0] + self.rgb = rgb + + def process_tag(self): + bb = bytes() + for i in range(3): + clr = self.rgb[i] + if isinstance(clr, float): # pragma: no cover - not used + clr = clr * 255 + bb += int2uint8(clr) + self.bytes = bb + + +class DoActionTag(Tag): + def __init__(self, action="stop"): + Tag.__init__(self) + self.tagtype = 12 + self.actions = [action] + + def append(self, action): # pragma: no cover - not used + self.actions.append(action) + + def process_tag(self): + bb = bytes() + + for action in self.actions: + action = action.lower() + if action == "stop": + bb += "\x07".encode("ascii") + elif action == "play": # pragma: no cover - not used + bb += "\x06".encode("ascii") + else: # pragma: no cover + logger.warning("unknown action: %s" % action) + + bb += int2uint8(0) + self.bytes = bb + + +# Definition tags +class DefinitionTag(Tag): + counter = 0 # to give automatically id's + + def __init__(self): + Tag.__init__(self) + DefinitionTag.counter += 1 + self.id = DefinitionTag.counter # id in dictionary + + +class BitmapTag(DefinitionTag): + def __init__(self, im): + DefinitionTag.__init__(self) + self.tagtype = 36 # DefineBitsLossless2 + + # convert image (note that format is ARGB) + # even a grayscale image is stored in ARGB, nevertheless, + # the fabilous deflate compression will make it that not much + # more data is required for storing (25% or so, and less than 10% + # when storing RGB as ARGB). + + if len(im.shape) == 3: + if im.shape[2] in [3, 4]: + tmp = np.ones((im.shape[0], im.shape[1], 4), dtype=np.uint8) * 255 + for i in range(3): + tmp[:, :, i + 1] = im[:, :, i] + if im.shape[2] == 4: + tmp[:, :, 0] = im[:, :, 3] # swap channel where alpha is + else: # pragma: no cover + raise ValueError("Invalid shape to be an image.") + + elif len(im.shape) == 2: + tmp = np.ones((im.shape[0], im.shape[1], 4), dtype=np.uint8) * 255 + for i in range(3): + tmp[:, :, i + 1] = im[:, :] + else: # pragma: no cover + raise ValueError("Invalid shape to be an image.") + + # we changed the image to uint8 4 channels. + # now compress! + self._data = zlib.compress(tmp.tobytes(), zlib.DEFLATED) + self.imshape = im.shape + + def process_tag(self): + # build tag + bb = bytes() + bb += int2uint16(self.id) # CharacterID + bb += int2uint8(5) # BitmapFormat + bb += int2uint16(self.imshape[1]) # BitmapWidth + bb += int2uint16(self.imshape[0]) # BitmapHeight + bb += self._data # ZlibBitmapData + + self.bytes = bb + + +class PlaceObjectTag(ControlTag): + def __init__(self, depth, idToPlace=None, xy=(0, 0), move=False): + ControlTag.__init__(self) + self.tagtype = 26 + self.depth = depth + self.idToPlace = idToPlace + self.xy = xy + self.move = move + + def process_tag(self): + # retrieve stuff + depth = self.depth + xy = self.xy + id = self.idToPlace + + # build PlaceObject2 + bb = bytes() + if self.move: + bb += "\x07".encode("ascii") + else: + # (8 bit flags): 4:matrix, 2:character, 1:move + bb += "\x06".encode("ascii") + bb += int2uint16(depth) # Depth + bb += int2uint16(id) # character id + bb += self.make_matrix_record(trans_xy=xy).tobytes() # MATRIX record + self.bytes = bb + + +class ShapeTag(DefinitionTag): + def __init__(self, bitmapId, xy, wh): + DefinitionTag.__init__(self) + self.tagtype = 2 + self.bitmapId = bitmapId + self.xy = xy + self.wh = wh + + def process_tag(self): + """Returns a defineshape tag. with a bitmap fill""" + + bb = bytes() + bb += int2uint16(self.id) + xy, wh = self.xy, self.wh + tmp = self.make_rect_record(xy[0], wh[0], xy[1], wh[1]) # ShapeBounds + bb += tmp.tobytes() + + # make SHAPEWITHSTYLE structure + + # first entry: FILLSTYLEARRAY with in it a single fill style + bb += int2uint8(1) # FillStyleCount + bb += "\x41".encode("ascii") # FillStyleType (0x41 or 0x43 unsmoothed) + bb += int2uint16(self.bitmapId) # BitmapId + # bb += '\x00' # BitmapMatrix (empty matrix with leftover bits filled) + bb += self.make_matrix_record(scale_xy=(20, 20)).tobytes() + + # # first entry: FILLSTYLEARRAY with in it a single fill style + # bb += int2uint8(1) # FillStyleCount + # bb += '\x00' # solid fill + # bb += '\x00\x00\xff' # color + + # second entry: LINESTYLEARRAY with a single line style + bb += int2uint8(0) # LineStyleCount + # bb += int2uint16(0*20) # Width + # bb += '\x00\xff\x00' # Color + + # third and fourth entry: NumFillBits and NumLineBits (4 bits each) + # I each give them four bits, so 16 styles possible. + bb += "\x44".encode("ascii") + + self.bytes = bb + + # last entries: SHAPERECORDs ... (individual shape records not aligned) + # STYLECHANGERECORD + bits = BitArray() + bits += self.make_style_change_record(0, 1, moveTo=(self.wh[0], self.wh[1])) + # STRAIGHTEDGERECORD 4x + bits += self.make_straight_edge_record(-self.wh[0], 0) + bits += self.make_straight_edge_record(0, -self.wh[1]) + bits += self.make_straight_edge_record(self.wh[0], 0) + bits += self.make_straight_edge_record(0, self.wh[1]) + + # ENDSHAPRECORD + bits += self.make_end_shape_record() + + self.bytes += bits.tobytes() + + # done + # self.bytes = bb + + def make_style_change_record(self, lineStyle=None, fillStyle=None, moveTo=None): + # first 6 flags + # Note that we use FillStyle1. If we don't flash (at least 8) does not + # recognize the frames properly when importing to library. + + bits = BitArray() + bits += "0" # TypeFlag (not an edge record) + bits += "0" # StateNewStyles (only for DefineShape2 and Defineshape3) + if lineStyle: + bits += "1" # StateLineStyle + else: + bits += "0" + if fillStyle: + bits += "1" # StateFillStyle1 + else: + bits += "0" + bits += "0" # StateFillStyle0 + if moveTo: + bits += "1" # StateMoveTo + else: + bits += "0" + + # give information + # todo: nbits for fillStyle and lineStyle is hard coded. + + if moveTo: + bits += twits2bits([moveTo[0], moveTo[1]]) + if fillStyle: + bits += int2bits(fillStyle, 4) + if lineStyle: + bits += int2bits(lineStyle, 4) + + return bits + + def make_straight_edge_record(self, *dxdy): + if len(dxdy) == 1: + dxdy = dxdy[0] + + # determine required number of bits + xbits = signedint2bits(dxdy[0] * 20) + ybits = signedint2bits(dxdy[1] * 20) + nbits = max([len(xbits), len(ybits)]) + + bits = BitArray() + bits += "11" # TypeFlag and StraightFlag + bits += int2bits(nbits - 2, 4) + bits += "1" # GeneralLineFlag + bits += signedint2bits(dxdy[0] * 20, nbits) + bits += signedint2bits(dxdy[1] * 20, nbits) + + # note: I do not make use of vertical/horizontal only lines... + + return bits + + def make_end_shape_record(self): + bits = BitArray() + bits += "0" # TypeFlag: no edge + bits += "0" * 5 # EndOfShape + return bits + + +def read_pixels(bb, i, tagType, L1): + """With pf's seed after the recordheader, reads the pixeldata.""" + + # Get info + charId = bb[i : i + 2] # noqa + i += 2 + format = ord(bb[i : i + 1]) + i += 1 + width = bits2int(bb[i : i + 2], 16) + i += 2 + height = bits2int(bb[i : i + 2], 16) + i += 2 + + # If we can, get pixeldata and make numpy array + if format != 5: + logger.warning("Can only read 24bit or 32bit RGB(A) lossless images.") + else: + # Read byte data + offset = 2 + 1 + 2 + 2 # all the info bits + bb2 = bb[i : i + (L1 - offset)] + + # Decompress and make numpy array + data = zlib.decompress(bb2) + a = np.frombuffer(data, dtype=np.uint8) + + # Set shape + if tagType == 20: + # DefineBitsLossless - RGB data + try: + a.shape = height, width, 3 + except Exception: + # Byte align stuff might cause troubles + logger.warning("Cannot read image due to byte alignment") + if tagType == 36: + # DefineBitsLossless2 - ARGB data + a.shape = height, width, 4 + # Swap alpha channel to make RGBA + b = a + a = np.zeros_like(a) + a[:, :, 0] = b[:, :, 1] + a[:, :, 1] = b[:, :, 2] + a[:, :, 2] = b[:, :, 3] + a[:, :, 3] = b[:, :, 0] + + return a + + +# Last few functions + + +# These are the original public functions, we don't use them, but we +# keep it so that in principle this module can be used stand-alone. + + +def checkImages(images): # pragma: no cover + """checkImages(images) + Check numpy images and correct intensity range etc. + The same for all movie formats. + """ + # Init results + images2 = [] + + for im in images: + if isinstance(im, np.ndarray): + # Check and convert dtype + if im.dtype == np.uint8: + images2.append(im) # Ok + elif im.dtype in [np.float32, np.float64]: + theMax = im.max() + if 128 < theMax < 300: + pass # assume 0:255 + else: + im = im.copy() + im[im < 0] = 0 + im[im > 1] = 1 + im *= 255 + images2.append(im.astype(np.uint8)) + else: + im = im.astype(np.uint8) + images2.append(im) + # Check size + if im.ndim == 2: + pass # ok + elif im.ndim == 3: + if im.shape[2] not in [3, 4]: + raise ValueError("This array can not represent an image.") + else: + raise ValueError("This array can not represent an image.") + else: + raise ValueError("Invalid image type: " + str(type(im))) + + # Done + return images2 + + +def build_file( + fp, taglist, nframes=1, framesize=(500, 500), fps=10, version=8 +): # pragma: no cover + """Give the given file (as bytes) a header.""" + + # compose header + bb = bytes() + bb += "F".encode("ascii") # uncompressed + bb += "WS".encode("ascii") # signature bytes + bb += int2uint8(version) # version + bb += "0000".encode("ascii") # FileLength (leave open for now) + bb += Tag().make_rect_record(0, framesize[0], 0, framesize[1]).tobytes() + bb += int2uint8(0) + int2uint8(fps) # FrameRate + bb += int2uint16(nframes) + fp.write(bb) + + # produce all tags + for tag in taglist: + fp.write(tag.get_tag()) + + # finish with end tag + fp.write("\x00\x00".encode("ascii")) + + # set size + sze = fp.tell() + fp.seek(4) + fp.write(int2uint32(sze)) + + +def write_swf(filename, images, duration=0.1, repeat=True): # pragma: no cover + """Write an swf-file from the specified images. If repeat is False, + the movie is finished with a stop action. Duration may also + be a list with durations for each frame (note that the duration + for each frame is always an integer amount of the minimum duration.) + + Images should be a list consisting numpy arrays with values between + 0 and 255 for integer types, and between 0 and 1 for float types. + + """ + + # Check images + images2 = checkImages(images) + + # Init + taglist = [FileAttributesTag(), SetBackgroundTag(0, 0, 0)] + + # Check duration + if hasattr(duration, "__len__"): + if len(duration) == len(images2): + duration = [d for d in duration] + else: + raise ValueError("len(duration) doesn't match amount of images.") + else: + duration = [duration for im in images2] + + # Build delays list + minDuration = float(min(duration)) + delays = [round(d / minDuration) for d in duration] + delays = [max(1, int(d)) for d in delays] + + # Get FPS + fps = 1.0 / minDuration + + # Produce series of tags for each image + # t0 = time.time() + nframes = 0 + for im in images2: + bm = BitmapTag(im) + wh = (im.shape[1], im.shape[0]) + sh = ShapeTag(bm.id, (0, 0), wh) + po = PlaceObjectTag(1, sh.id, move=nframes > 0) + taglist.extend([bm, sh, po]) + for i in range(delays[nframes]): + taglist.append(ShowFrameTag()) + nframes += 1 + + if not repeat: + taglist.append(DoActionTag("stop")) + + # Build file + # t1 = time.time() + fp = open(filename, "wb") + try: + build_file(fp, taglist, nframes=nframes, framesize=wh, fps=fps) + except Exception: + raise + finally: + fp.close() + # t2 = time.time() + + # logger.warning("Writing SWF took %1.2f and %1.2f seconds" % (t1-t0, t2-t1) ) + + +def read_swf(filename): # pragma: no cover + """Read all images from an SWF (shockwave flash) file. Returns a list + of numpy arrays. + + Limitation: only read the PNG encoded images (not the JPG encoded ones). + """ + + # Check whether it exists + if not os.path.isfile(filename): + raise IOError("File not found: " + str(filename)) + + # Init images + images = [] + + # Open file and read all + fp = open(filename, "rb") + bb = fp.read() + + try: + # Check opening tag + tmp = bb[0:3].decode("ascii", "ignore") + if tmp.upper() == "FWS": + pass # ok + elif tmp.upper() == "CWS": + # Decompress movie + bb = bb[:8] + zlib.decompress(bb[8:]) + else: + raise IOError("Not a valid SWF file: " + str(filename)) + + # Set filepointer at first tag (skipping framesize RECT and two uin16's + i = 8 + nbits = bits2int(bb[i : i + 1], 5) # skip FrameSize + nbits = 5 + nbits * 4 + Lrect = nbits / 8.0 + if Lrect % 1: + Lrect += 1 + Lrect = int(Lrect) + i += Lrect + 4 + + # Iterate over the tags + counter = 0 + while True: + counter += 1 + + # Get tag header + head = bb[i : i + 6] + if not head: + break # Done (we missed end tag) + + # Determine type and length + T, L1, L2 = get_type_and_len(head) + if not L2: + logger.warning("Invalid tag length, could not proceed") + break + # logger.warning(T, L2) + + # Read image if we can + if T in [20, 36]: + im = read_pixels(bb, i + 6, T, L1) + if im is not None: + images.append(im) + elif T in [6, 21, 35, 90]: + logger.warning("Ignoring JPEG image: cannot read JPEG.") + else: + pass # Not an image tag + + # Detect end tag + if T == 0: + break + + # Next tag! + i += L2 + + finally: + fp.close() + + # Done + return images + + +# Backward compatibility; same public names as when this was images2swf. +writeSwf = write_swf +readSwf = read_swf diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/example.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/example.py new file mode 100644 index 0000000000000000000000000000000000000000..b7cf8b9b3d01229c71cacf1cb86c920960aa6ec4 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/example.py @@ -0,0 +1,145 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Example plugin. You can use this as a template for your own plugin. +""" + +import numpy as np + +from .. import formats +from ..core import Format + + +class DummyFormat(Format): + """The dummy format is an example format that does nothing. + It will never indicate that it can read or write a file. When + explicitly asked to read, it will simply read the bytes. When + explicitly asked to write, it will raise an error. + + This documentation is shown when the user does ``help('thisformat')``. + + Parameters for reading + ---------------------- + Specify arguments in numpy doc style here. + + Parameters for saving + --------------------- + Specify arguments in numpy doc style here. + + """ + + def _can_read(self, request): + # This method is called when the format manager is searching + # for a format to read a certain image. Return True if this format + # can do it. + # + # The format manager is aware of the extensions and the modes + # that each format can handle. It will first ask all formats + # that *seem* to be able to read it whether they can. If none + # can, it will ask the remaining formats if they can: the + # extension might be missing, and this allows formats to provide + # functionality for certain extensions, while giving preference + # to other plugins. + # + # If a format says it can, it should live up to it. The format + # would ideally check the request.firstbytes and look for a + # header of some kind. + # + # The request object has: + # request.filename: a representation of the source (only for reporting) + # request.firstbytes: the first 256 bytes of the file. + # request.mode[0]: read or write mode + + if request.extension in self.extensions: + return True + + def _can_write(self, request): + # This method is called when the format manager is searching + # for a format to write a certain image. It will first ask all + # formats that *seem* to be able to write it whether they can. + # If none can, it will ask the remaining formats if they can. + # + # Return True if the format can do it. + + # In most cases, this code does suffice: + if request.extension in self.extensions: + return True + + # -- reader + + class Reader(Format.Reader): + def _open(self, some_option=False, length=1): + # Specify kwargs here. Optionally, the user-specified kwargs + # can also be accessed via the request.kwargs object. + # + # The request object provides two ways to get access to the + # data. Use just one: + # - Use request.get_file() for a file object (preferred) + # - Use request.get_local_filename() for a file on the system + self._fp = self.request.get_file() + self._length = length # passed as an arg in this case for testing + self._data = None + + def _close(self): + # Close the reader. + # Note that the request object will close self._fp + pass + + def _get_length(self): + # Return the number of images. Can be np.inf + return self._length + + def _get_data(self, index): + # Return the data and meta data for the given index + if index >= self._length: + raise IndexError("Image index %i > %i" % (index, self._length)) + # Read all bytes + if self._data is None: + self._data = self._fp.read() + # Put in a numpy array + im = np.frombuffer(self._data, "uint8") + im.shape = len(im), 1 + # Return array and dummy meta data + return im, {} + + def _get_meta_data(self, index): + # Get the meta data for the given index. If index is None, it + # should return the global meta data. + return {} # This format does not support meta data + + # -- writer + + class Writer(Format.Writer): + def _open(self, flags=0): + # Specify kwargs here. Optionally, the user-specified kwargs + # can also be accessed via the request.kwargs object. + # + # The request object provides two ways to write the data. + # Use just one: + # - Use request.get_file() for a file object (preferred) + # - Use request.get_local_filename() for a file on the system + self._fp = self.request.get_file() + + def _close(self): + # Close the reader. + # Note that the request object will close self._fp + pass + + def _append_data(self, im, meta): + # Process the given data and meta data. + raise RuntimeError("The dummy format cannot write image data.") + + def set_meta_data(self, meta): + # Process the given meta data (global for all images) + # It is not mandatory to support this. + raise RuntimeError("The dummy format cannot write meta data.") + + +# Register. You register an *instance* of a Format class. Here specify: +format = DummyFormat( + "dummy", # short name + "An example format that does nothing.", # one line descr. + ".foobar .nonexistentext", # list of extensions + "iI", # modes, characters in iIvV +) +formats.add_format(format) diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/lytro.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/lytro.py new file mode 100644 index 0000000000000000000000000000000000000000..add38ad9384ce4123191d67ba0426bb2b6016ae3 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/lytro.py @@ -0,0 +1,714 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2018, imageio contributors +# imageio is distributed under the terms of the (new) BSD License. +# + +""" Read LFR files (Lytro Illum). + +Backend: internal + +Plugin to read Lytro Illum .lfr and .raw files as produced +by the Lytro Illum light field camera. It is actually a collection +of plugins, each supporting slightly different keyword arguments + +Parameters +---------- +meta_only : bool + Whether to only read the metadata. +include_thumbnail : bool + (only for lytro-lfr and lytro-lfp) + Whether to include an image thumbnail in the metadata. + +""" +# +# +# This code is based on work by +# David Uhlig and his lfr_reader +# (https://www.iiit.kit.edu/uhlig.php) +# Donald Dansereau and his Matlab LF Toolbox +# (http://dgd.vision/Tools/LFToolbox/) +# and Behnam Esfahbod and his Python LFP-Reader +# (https://github.com/behnam/python-lfp-reader/) + + +import os +import json +import struct +import logging + + +import numpy as np + +from ..core import Format +from ..v2 import imread + + +logger = logging.getLogger(__name__) + + +# Sensor size of Lytro Illum resp. Lytro F01 light field camera sensor +LYTRO_ILLUM_IMAGE_SIZE = (5368, 7728) +LYTRO_F01_IMAGE_SIZE = (3280, 3280) + +# Parameter of lfr file format +HEADER_LENGTH = 12 +SIZE_LENGTH = 4 # = 16 - header_length +SHA1_LENGTH = 45 # = len("sha1-") + (160 / 4) +PADDING_LENGTH = 35 # = (4*16) - header_length - size_length - sha1_length +DATA_CHUNKS_ILLUM = 11 +DATA_CHUNKS_F01 = 3 + + +class LytroFormat(Format): + """Base class for Lytro format. + The subclasses LytroLfrFormat, LytroLfpFormat, LytroIllumRawFormat and + LytroF01RawFormat implement the Lytro-LFR, Lytro-LFP and Lytro-RAW format + for the Illum and original F01 camera respectively. + Writing is not supported. + """ + + # Only single images are supported. + _modes = "i" + + def _can_write(self, request): + # Writing of Lytro files is not supported + return False + + # -- writer + + class Writer(Format.Writer): + def _open(self, flags=0): + self._fp = self.request.get_file() + + def _close(self): + # Close the reader. + # Note that the request object will close self._fp + pass + + def _append_data(self, im, meta): + # Process the given data and meta data. + raise RuntimeError("The lytro format cannot write image data.") + + def _set_meta_data(self, meta): + # Process the given meta data (global for all images) + # It is not mandatory to support this. + raise RuntimeError("The lytro format cannot write meta data.") + + +class LytroIllumRawFormat(LytroFormat): + """This is the Lytro Illum RAW format. + The raw format is a 10bit image format as used by the Lytro Illum + light field camera. The format will read the specified raw file and will + try to load a .txt or .json file with the associated meta data. + This format does not support writing. + + + Parameters for reading + ---------------------- + meta_only : bool + Whether to only read the metadata. + """ + + def _can_read(self, request): + # Check if mode and extensions are supported by the format + if request.extension in (".raw",): + return True + + @staticmethod + def rearrange_bits(array): + # Do bit rearrangement for the 10-bit lytro raw format + # Normalize output to 1.0 as float64 + t0 = array[0::5] + t1 = array[1::5] + t2 = array[2::5] + t3 = array[3::5] + lsb = array[4::5] + + t0 = np.left_shift(t0, 2) + np.bitwise_and(lsb, 3) + t1 = np.left_shift(t1, 2) + np.right_shift(np.bitwise_and(lsb, 12), 2) + t2 = np.left_shift(t2, 2) + np.right_shift(np.bitwise_and(lsb, 48), 4) + t3 = np.left_shift(t3, 2) + np.right_shift(np.bitwise_and(lsb, 192), 6) + + image = np.zeros(LYTRO_ILLUM_IMAGE_SIZE, dtype=np.uint16) + image[:, 0::4] = t0.reshape( + (LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4) + ) + image[:, 1::4] = t1.reshape( + (LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4) + ) + image[:, 2::4] = t2.reshape( + (LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4) + ) + image[:, 3::4] = t3.reshape( + (LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4) + ) + + # Normalize data to 1.0 as 64-bit float. + # Division is by 1023 as the Lytro Illum saves 10-bit raw data. + return np.divide(image, 1023.0).astype(np.float64) + + # -- reader + + class Reader(Format.Reader): + def _open(self, meta_only=False): + self._file = self.request.get_file() + self._data = None + self._meta_only = meta_only + + def _close(self): + # Close the reader. + # Note that the request object will close self._file + del self._data + + def _get_length(self): + # Return the number of images. + return 1 + + def _get_data(self, index): + # Return the data and meta data for the given index + + if index not in [0, "None"]: + raise IndexError("Lytro file contains only one dataset") + + if not self._meta_only: + # Read all bytes + if self._data is None: + self._data = self._file.read() + + # Read bytes from string and convert to uint16 + raw = np.frombuffer(self._data, dtype=np.uint8).astype(np.uint16) + + # Rearrange bits + img = LytroIllumRawFormat.rearrange_bits(raw) + + else: + # Return empty image + img = np.array([]) + + # Return image and meta data + return img, self._get_meta_data(index=0) + + def _get_meta_data(self, index): + # Get the meta data for the given index. If index is None, it + # should return the global meta data. + + if index not in [0, None]: + raise IndexError("Lytro meta data file contains only one dataset") + + # Try to read meta data from meta data file corresponding + # to the raw data file, extension in [.txt, .TXT, .json, .JSON] + filename_base = os.path.splitext(self.request.get_local_filename())[0] + + meta_data = None + + for ext in [".txt", ".TXT", ".json", ".JSON"]: + if os.path.isfile(filename_base + ext): + meta_data = json.load(open(filename_base + ext)) + + if meta_data is not None: + return meta_data + + else: + logger.warning("No metadata file found for provided raw file.") + return {} + + +class LytroLfrFormat(LytroFormat): + """This is the Lytro Illum LFR format. + The lfr is a image and meta data container format as used by the + Lytro Illum light field camera. + The format will read the specified lfr file. + This format does not support writing. + + Parameters for reading + ---------------------- + meta_only : bool + Whether to only read the metadata. + include_thumbnail : bool + Whether to include an image thumbnail in the metadata. + """ + + def _can_read(self, request): + # Check if mode and extensions are supported by the format + if request.extension in (".lfr",): + return True + + # -- reader + + class Reader(Format.Reader): + def _open(self, meta_only=False, include_thumbnail=True): + self._file = self.request.get_file() + self._data = None + self._chunks = {} + self.metadata = {} + self._content = None + self._meta_only = meta_only + self._include_thumbnail = include_thumbnail + + self._find_header() + self._find_chunks() + self._find_meta() + + try: + # Get sha1 dict and check if it is in dictionary of data chunks + chunk_dict = self._content["frames"][0]["frame"] + if ( + chunk_dict["metadataRef"] in self._chunks + and chunk_dict["imageRef"] in self._chunks + and chunk_dict["privateMetadataRef"] in self._chunks + ): + if not self._meta_only: + # Read raw image data byte buffer + data_pos, size = self._chunks[chunk_dict["imageRef"]] + self._file.seek(data_pos, 0) + self.raw_image_data = self._file.read(size) + + # Read meta data + data_pos, size = self._chunks[chunk_dict["metadataRef"]] + self._file.seek(data_pos, 0) + metadata = self._file.read(size) + # Add metadata to meta data dict + self.metadata["metadata"] = json.loads(metadata.decode("ASCII")) + + # Read private metadata + data_pos, size = self._chunks[chunk_dict["privateMetadataRef"]] + self._file.seek(data_pos, 0) + serial_numbers = self._file.read(size) + self.serial_numbers = json.loads(serial_numbers.decode("ASCII")) + # Add private metadata to meta data dict + self.metadata["privateMetadata"] = self.serial_numbers + + # Read image preview thumbnail + if self._include_thumbnail: + chunk_dict = self._content["thumbnails"][0] + if chunk_dict["imageRef"] in self._chunks: + # Read thumbnail image from thumbnail chunk + data_pos, size = self._chunks[chunk_dict["imageRef"]] + self._file.seek(data_pos, 0) + # Read binary data, read image as jpeg + thumbnail_data = self._file.read(size) + thumbnail_img = imread(thumbnail_data, format="jpeg") + + thumbnail_height = chunk_dict["height"] + thumbnail_width = chunk_dict["width"] + + # Add thumbnail to metadata + self.metadata["thumbnail"] = { + "image": thumbnail_img, + "height": thumbnail_height, + "width": thumbnail_width, + } + + except KeyError: + raise RuntimeError("The specified file is not a valid LFR file.") + + def _close(self): + # Close the reader. + # Note that the request object will close self._file + del self._data + + def _get_length(self): + # Return the number of images. Can be np.inf + return 1 + + def _find_header(self): + """ + Checks if file has correct header and skip it. + """ + file_header = b"\x89LFP\x0D\x0A\x1A\x0A\x00\x00\x00\x01" + # Read and check header of file + header = self._file.read(HEADER_LENGTH) + if header != file_header: + raise RuntimeError("The LFR file header is invalid.") + + # Read first bytes to skip header + self._file.read(SIZE_LENGTH) + + def _find_chunks(self): + """ + Gets start position and size of data chunks in file. + """ + chunk_header = b"\x89LFC\x0D\x0A\x1A\x0A\x00\x00\x00\x00" + + for i in range(0, DATA_CHUNKS_ILLUM): + data_pos, size, sha1 = self._get_chunk(chunk_header) + self._chunks[sha1] = (data_pos, size) + + def _find_meta(self): + """ + Gets a data chunk that contains information over content + of other data chunks. + """ + meta_header = b"\x89LFM\x0D\x0A\x1A\x0A\x00\x00\x00\x00" + data_pos, size, sha1 = self._get_chunk(meta_header) + + # Get content + self._file.seek(data_pos, 0) + data = self._file.read(size) + self._content = json.loads(data.decode("ASCII")) + + def _get_chunk(self, header): + """ + Checks if chunk has correct header and skips it. + Finds start position and length of next chunk and reads + sha1-string that identifies the following data chunk. + + Parameters + ---------- + header : bytes + Byte string that identifies start of chunk. + + Returns + ------- + data_pos : int + Start position of data chunk in file. + size : int + Size of data chunk. + sha1 : str + Sha1 value of chunk. + """ + # Read and check header of chunk + header_chunk = self._file.read(HEADER_LENGTH) + if header_chunk != header: + raise RuntimeError("The LFR chunk header is invalid.") + + data_pos = None + sha1 = None + + # Read size + size = struct.unpack(">i", self._file.read(SIZE_LENGTH))[0] + if size > 0: + # Read sha1 + sha1 = str(self._file.read(SHA1_LENGTH).decode("ASCII")) + # Skip fixed null chars + self._file.read(PADDING_LENGTH) + # Find start of data and skip data + data_pos = self._file.tell() + self._file.seek(size, 1) + # Skip extra null chars + ch = self._file.read(1) + while ch == b"\0": + ch = self._file.read(1) + self._file.seek(-1, 1) + + return data_pos, size, sha1 + + def _get_data(self, index): + # Return the data and meta data for the given index + if index not in [0, None]: + raise IndexError("Lytro lfr file contains only one dataset") + + if not self._meta_only: + # Read bytes from string and convert to uint16 + raw = np.frombuffer(self.raw_image_data, dtype=np.uint8).astype( + np.uint16 + ) + im = LytroIllumRawFormat.rearrange_bits(raw) + else: + im = np.array([]) + + # Return array and dummy meta data + return im, self.metadata + + def _get_meta_data(self, index): + # Get the meta data for the given index. If index is None, + # it returns the global meta data. + if index not in [0, None]: + raise IndexError("Lytro meta data file contains only one dataset") + + return self.metadata + + +class LytroF01RawFormat(LytroFormat): + """This is the Lytro RAW format for the original F01 Lytro camera. + The raw format is a 12bit image format as used by the Lytro F01 + light field camera. The format will read the specified raw file and will + try to load a .txt or .json file with the associated meta data. + This format does not support writing. + + + Parameters for reading + ---------------------- + meta_only : bool + Whether to only read the metadata. + + """ + + def _can_read(self, request): + # Check if mode and extensions are supported by the format + if request.extension in (".raw",): + return True + + @staticmethod + def rearrange_bits(array): + # Do bit rearrangement for the 12-bit lytro raw format + # Normalize output to 1.0 as float64 + t0 = array[0::3] + t1 = array[1::3] + t2 = array[2::3] + + a0 = np.left_shift(t0, 4) + np.right_shift(np.bitwise_and(t1, 240), 4) + a1 = np.left_shift(np.bitwise_and(t1, 15), 8) + t2 + + image = np.zeros(LYTRO_F01_IMAGE_SIZE, dtype=np.uint16) + image[:, 0::2] = a0.reshape( + (LYTRO_F01_IMAGE_SIZE[0], LYTRO_F01_IMAGE_SIZE[1] // 2) + ) + image[:, 1::2] = a1.reshape( + (LYTRO_F01_IMAGE_SIZE[0], LYTRO_F01_IMAGE_SIZE[1] // 2) + ) + + # Normalize data to 1.0 as 64-bit float. + # Division is by 4095 as the Lytro F01 saves 12-bit raw data. + return np.divide(image, 4095.0).astype(np.float64) + + # -- reader + + class Reader(Format.Reader): + def _open(self, meta_only=False): + self._file = self.request.get_file() + self._data = None + self._meta_only = meta_only + + def _close(self): + # Close the reader. + # Note that the request object will close self._file + del self._data + + def _get_length(self): + # Return the number of images. + return 1 + + def _get_data(self, index): + # Return the data and meta data for the given index + + if index not in [0, "None"]: + raise IndexError("Lytro file contains only one dataset") + + if not self._meta_only: + # Read all bytes + if self._data is None: + self._data = self._file.read() + + # Read bytes from string and convert to uint16 + raw = np.frombuffer(self._data, dtype=np.uint8).astype(np.uint16) + + # Rearrange bits + img = LytroF01RawFormat.rearrange_bits(raw) + + else: + img = np.array([]) + + # Return image and meta data + return img, self._get_meta_data(index=0) + + def _get_meta_data(self, index): + # Get the meta data for the given index. If index is None, it + # should return the global meta data. + + if index not in [0, None]: + raise IndexError("Lytro meta data file contains only one dataset") + + # Try to read meta data from meta data file corresponding + # to the raw data file, extension in [.txt, .TXT, .json, .JSON] + filename_base = os.path.splitext(self.request.get_local_filename())[0] + + meta_data = None + + for ext in [".txt", ".TXT", ".json", ".JSON"]: + if os.path.isfile(filename_base + ext): + meta_data = json.load(open(filename_base + ext)) + + if meta_data is not None: + return meta_data + + else: + logger.warning("No metadata file found for provided raw file.") + return {} + + +class LytroLfpFormat(LytroFormat): + """This is the Lytro Illum LFP format. + The lfp is a image and meta data container format as used by the + Lytro F01 light field camera. + The format will read the specified lfp file. + This format does not support writing. + + Parameters for reading + ---------------------- + meta_only : bool + Whether to only read the metadata. + include_thumbnail : bool + Whether to include an image thumbnail in the metadata. + """ + + def _can_read(self, request): + # Check if mode and extensions are supported by the format + if request.extension in (".lfp",): + return True + + # -- reader + + class Reader(Format.Reader): + def _open(self, meta_only=False): + self._file = self.request.get_file() + self._data = None + self._chunks = {} + self.metadata = {} + self._content = None + self._meta_only = meta_only + + self._find_header() + self._find_meta() + self._find_chunks() + + try: + # Get sha1 dict and check if it is in dictionary of data chunks + chunk_dict = self._content["picture"]["frameArray"][0]["frame"] + if ( + chunk_dict["metadataRef"] in self._chunks + and chunk_dict["imageRef"] in self._chunks + and chunk_dict["privateMetadataRef"] in self._chunks + ): + if not self._meta_only: + # Read raw image data byte buffer + data_pos, size = self._chunks[chunk_dict["imageRef"]] + self._file.seek(data_pos, 0) + self.raw_image_data = self._file.read(size) + + # Read meta data + data_pos, size = self._chunks[chunk_dict["metadataRef"]] + self._file.seek(data_pos, 0) + metadata = self._file.read(size) + # Add metadata to meta data dict + self.metadata["metadata"] = json.loads(metadata.decode("ASCII")) + + # Read private metadata + data_pos, size = self._chunks[chunk_dict["privateMetadataRef"]] + self._file.seek(data_pos, 0) + serial_numbers = self._file.read(size) + self.serial_numbers = json.loads(serial_numbers.decode("ASCII")) + # Add private metadata to meta data dict + self.metadata["privateMetadata"] = self.serial_numbers + + except KeyError: + raise RuntimeError("The specified file is not a valid LFP file.") + + def _close(self): + # Close the reader. + # Note that the request object will close self._file + del self._data + + def _get_length(self): + # Return the number of images. Can be np.inf + return 1 + + def _find_header(self): + """ + Checks if file has correct header and skip it. + """ + file_header = b"\x89LFP\x0D\x0A\x1A\x0A\x00\x00\x00\x01" + + # Read and check header of file + header = self._file.read(HEADER_LENGTH) + if header != file_header: + raise RuntimeError("The LFP file header is invalid.") + + # Read first bytes to skip header + self._file.read(SIZE_LENGTH) + + def _find_chunks(self): + """ + Gets start position and size of data chunks in file. + """ + chunk_header = b"\x89LFC\x0D\x0A\x1A\x0A\x00\x00\x00\x00" + + for i in range(0, DATA_CHUNKS_F01): + data_pos, size, sha1 = self._get_chunk(chunk_header) + self._chunks[sha1] = (data_pos, size) + + def _find_meta(self): + """ + Gets a data chunk that contains information over content + of other data chunks. + """ + meta_header = b"\x89LFM\x0D\x0A\x1A\x0A\x00\x00\x00\x00" + + data_pos, size, sha1 = self._get_chunk(meta_header) + + # Get content + self._file.seek(data_pos, 0) + data = self._file.read(size) + self._content = json.loads(data.decode("ASCII")) + data = self._file.read(5) # Skip 5 + + def _get_chunk(self, header): + """ + Checks if chunk has correct header and skips it. + Finds start position and length of next chunk and reads + sha1-string that identifies the following data chunk. + + Parameters + ---------- + header : bytes + Byte string that identifies start of chunk. + + Returns + ------- + data_pos : int + Start position of data chunk in file. + size : int + Size of data chunk. + sha1 : str + Sha1 value of chunk. + """ + # Read and check header of chunk + header_chunk = self._file.read(HEADER_LENGTH) + if header_chunk != header: + raise RuntimeError("The LFP chunk header is invalid.") + + data_pos = None + sha1 = None + + # Read size + size = struct.unpack(">i", self._file.read(SIZE_LENGTH))[0] + if size > 0: + # Read sha1 + sha1 = str(self._file.read(SHA1_LENGTH).decode("ASCII")) + # Skip fixed null chars + self._file.read(PADDING_LENGTH) + # Find start of data and skip data + data_pos = self._file.tell() + self._file.seek(size, 1) + # Skip extra null chars + ch = self._file.read(1) + while ch == b"\0": + ch = self._file.read(1) + self._file.seek(-1, 1) + + return data_pos, size, sha1 + + def _get_data(self, index): + # Return the data and meta data for the given index + if index not in [0, None]: + raise IndexError("Lytro lfp file contains only one dataset") + + if not self._meta_only: + # Read bytes from string and convert to uint16 + raw = np.frombuffer(self.raw_image_data, dtype=np.uint8).astype( + np.uint16 + ) + im = LytroF01RawFormat.rearrange_bits(raw) + else: + im = np.array([]) + + # Return array and dummy meta data + return im, self.metadata + + def _get_meta_data(self, index): + # Get the meta data for the given index. If index is None, + # it returns the global meta data. + if index not in [0, None]: + raise IndexError("Lytro meta data file contains only one dataset") + + return self.metadata diff --git a/minigpt2/lib/python3.10/site-packages/imageio/plugins/tifffile.py b/minigpt2/lib/python3.10/site-packages/imageio/plugins/tifffile.py new file mode 100644 index 0000000000000000000000000000000000000000..190cfe2656181352e0a5fc87eeec51f46d5901f3 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/imageio/plugins/tifffile.py @@ -0,0 +1,561 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Read/Write TIFF files. + +Backend: internal + +Provides support for a wide range of Tiff images using the tifffile +backend. + +Parameters for reading +---------------------- +offset : int + Optional start position of embedded file. By default this is + the current file position. +size : int + Optional size of embedded file. By default this is the number + of bytes from the 'offset' to the end of the file. +multifile : bool + If True (default), series may include pages from multiple files. + Currently applies to OME-TIFF only. +multifile_close : bool + If True (default), keep the handles of other files in multifile + series closed. This is inefficient when few files refer to + many pages. If False, the C runtime may run out of resources. + +Parameters for saving +--------------------- +bigtiff : bool + If True, the BigTIFF format is used. +byteorder : {'<', '>'} + The endianness of the data in the file. + By default this is the system's native byte order. +software : str + Name of the software used to create the image. + Saved with the first page only. + +Metadata for reading +-------------------- +planar_configuration : {'contig', 'planar'} + Specifies if samples are stored contiguous or in separate planes. + By default this setting is inferred from the data shape. + 'contig': last dimension contains samples. + 'planar': third last dimension contains samples. +resolution_unit : int + The resolution unit stored in the TIFF tag. Usually 1 means no/unknown unit, + 2 means dpi (inch), 3 means dpc (centimeter). +resolution : (float, float, str) + A tuple formatted as (X_resolution, Y_resolution, unit). The unit is a + string representing one of the following units:: + + NONE # No unit or unit unknown + INCH # dpi + CENTIMETER # cpi + MILLIMETER + MICROMETER + +compression : int + Value indicating the compression algorithm used, e.g. 5 is LZW, + 7 is JPEG, 8 is deflate. + If 1, data are uncompressed. +predictor : int + Value 2 indicates horizontal differencing was used before compression, + while 3 indicates floating point horizontal differencing. + If 1, no prediction scheme was used before compression. +orientation : {'top_left', 'bottom_right', ...} + Oriented of image array. +is_rgb : bool + True if page contains a RGB image. +is_contig : bool + True if page contains a contiguous image. +is_tiled : bool + True if page contains tiled image. +is_palette : bool + True if page contains a palette-colored image and not OME or STK. +is_reduced : bool + True if page is a reduced image of another image. +is_shaped : bool + True if page contains shape in image_description tag. +is_fluoview : bool + True if page contains FluoView MM_STAMP tag. +is_nih : bool + True if page contains NIH image header. +is_micromanager : bool + True if page contains Micro-Manager metadata. +is_ome : bool + True if page contains OME-XML in image_description tag. +is_sgi : bool + True if page contains SGI image and tile depth tags. +is_mdgel : bool + True if page contains md_file_tag tag. +is_mediacy : bool + True if page contains Media Cybernetics Id tag. +is_stk : bool + True if page contains UIC2Tag tag. +is_lsm : bool + True if page contains LSM CZ_LSM_INFO tag. +description : str + Image description +description1 : str + Additional description +is_imagej : None or str + ImageJ metadata +software : str + Software used to create the TIFF file +datetime : datetime.datetime + Creation date and time + +Metadata for writing +-------------------- +photometric : {'minisblack', 'miniswhite', 'rgb'} + The color space of the image data. + By default this setting is inferred from the data shape. +planarconfig : {'contig', 'planar'} + Specifies if samples are stored contiguous or in separate planes. + By default this setting is inferred from the data shape. + 'contig': last dimension contains samples. + 'planar': third last dimension contains samples. +resolution : (float, float) or ((int, int), (int, int)) + X and Y resolution in dots per inch as float or rational numbers. +description : str + The subject of the image. Saved with the first page only. +compress : int + Values from 0 to 9 controlling the level of zlib (deflate) compression. + If 0, data are written uncompressed (default). +compression : str, (int, int) + Compression scheme used while writing the image. If omitted (default) the + image is not uncompressed. Compression cannot be used to write contiguous + series. Compressors may require certain data shapes, types or value ranges. + For example, JPEG compression requires grayscale or RGB(A), uint8 or 12-bit + uint16. JPEG compression is experimental. JPEG markers and TIFF tags may not + match. Only a limited set of compression schemes are implemented. 'ZLIB' is + short for ADOBE_DEFLATE. The value is written to the Compression tag. +compressionargs: + Extra arguments passed to compression codec, e.g., compression level. Refer + to the Imagecodecs implementation for supported arguments. +predictor : bool + If True, horizontal differencing is applied before compression. + Note that using an int literal 1 actually means no prediction scheme + will be used. +volume : bool + If True, volume data are stored in one tile (if applicable) using + the SGI image_depth and tile_depth tags. + Image width and depth must be multiple of 16. + Few software can read this format, e.g. MeVisLab. +writeshape : bool + If True, write the data shape to the image_description tag + if necessary and no other description is given. +extratags: sequence of tuples + Additional tags as [(code, dtype, count, value, writeonce)]. + + code : int + The TIFF tag Id. + dtype : str + Data type of items in 'value' in Python struct format. + One of B, s, H, I, 2I, b, h, i, f, d, Q, or q. + count : int + Number of data values. Not used for string values. + value : sequence + 'Count' values compatible with 'dtype'. + writeonce : bool + If True, the tag is written to the first page only. + +Notes +----- +Global metadata is stored with the first frame in a TIFF file. +Thus calling :py:meth:`Format.Writer.set_meta_data` after the first frame +was written has no effect. Also, global metadata is ignored if metadata is +provided via the `meta` argument of :py:meth:`Format.Writer.append_data`. + +If you have installed tifffile as a Python package, imageio will attempt +to use that as backend instead of the bundled backend. Doing so can +provide access to new performance improvements and bug fixes. + +""" + +import datetime + +from ..core import Format +from ..core.request import URI_BYTES, URI_FILE + +import numpy as np +import warnings + + +try: + import tifffile as _tifffile +except ImportError: + warnings.warn( + "ImageIO's vendored tifffile backend is deprecated and will be" + " removed in ImageIO v3. Install the tifffile directly:" + " `pip install imageio[tifffile]`", + DeprecationWarning, + ) + from . import _tifffile + + +TIFF_FORMATS = (".tif", ".tiff", ".stk", ".lsm") +WRITE_METADATA_KEYS = ( + "photometric", + "planarconfig", + "resolution", + "description", + "compress", + "compression", + "compressionargs", + "predictor", + "volume", + "writeshape", + "extratags", + "datetime", +) +READ_METADATA_KEYS = ( + "planar_configuration", + "is_fluoview", + "is_nih", + "is_contig", + "is_micromanager", + "is_ome", + "is_lsm", + "is_palette", + "is_reduced", + "is_rgb", + "is_sgi", + "is_shaped", + "is_stk", + "is_tiled", + "is_mdgel", + "resolution_unit", + "compression", + "predictor", + "is_mediacy", + "orientation", + "description", + "description1", + "is_imagej", + "software", +) + + +class TiffFormat(Format): + """Provides support for a wide range of Tiff images using the tifffile + backend. + + Images that contain multiple pages can be read using ``imageio.mimread()`` + to read the individual pages, or ``imageio.volread()`` to obtain a + single (higher dimensional) array. + + Note that global metadata is stored with the first frame in a TIFF file. + Thus calling :py:meth:`Format.Writer.set_meta_data` after the first frame + was written has no effect. Also, global metadata is ignored if metadata is + provided via the `meta` argument of :py:meth:`Format.Writer.append_data`. + + If you have installed tifffile as a Python package, imageio will attempt + to use that as backend instead of the bundled backend. Doing so can + provide access to new performance improvements and bug fixes. + + Parameters for reading + ---------------------- + offset : int + Optional start position of embedded file. By default this is + the current file position. + size : int + Optional size of embedded file. By default this is the number + of bytes from the 'offset' to the end of the file. + multifile : bool + If True (default), series may include pages from multiple files. + Currently applies to OME-TIFF only. + multifile_close : bool + If True (default), keep the handles of other files in multifile + series closed. This is inefficient when few files refer to + many pages. If False, the C runtime may run out of resources. + + Parameters for saving + --------------------- + bigtiff : bool + If True, the BigTIFF format is used. + byteorder : {'<', '>'} + The endianness of the data in the file. + By default this is the system's native byte order. + software : str + Name of the software used to create the image. + Saved with the first page only. + + Metadata for reading + -------------------- + planar_configuration : {'contig', 'planar'} + Specifies if samples are stored contiguous or in separate planes. + By default this setting is inferred from the data shape. + 'contig': last dimension contains samples. + 'planar': third last dimension contains samples. + resolution_unit : (float, float) or ((int, int), (int, int)) + X and Y resolution in dots per inch as float or rational numbers. + compression : int + Value indicating the compression algorithm used, e.g. 5 is LZW, + 7 is JPEG, 8 is deflate. + If 1, data are uncompressed. + predictor : int + Value 2 indicates horizontal differencing was used before compression, + while 3 indicates floating point horizontal differencing. + If 1, no prediction scheme was used before compression. + orientation : {'top_left', 'bottom_right', ...} + Oriented of image array. + is_rgb : bool + True if page contains a RGB image. + is_contig : bool + True if page contains a contiguous image. + is_tiled : bool + True if page contains tiled image. + is_palette : bool + True if page contains a palette-colored image and not OME or STK. + is_reduced : bool + True if page is a reduced image of another image. + is_shaped : bool + True if page contains shape in image_description tag. + is_fluoview : bool + True if page contains FluoView MM_STAMP tag. + is_nih : bool + True if page contains NIH image header. + is_micromanager : bool + True if page contains Micro-Manager metadata. + is_ome : bool + True if page contains OME-XML in image_description tag. + is_sgi : bool + True if page contains SGI image and tile depth tags. + is_stk : bool + True if page contains UIC2Tag tag. + is_mdgel : bool + True if page contains md_file_tag tag. + is_mediacy : bool + True if page contains Media Cybernetics Id tag. + is_stk : bool + True if page contains UIC2Tag tag. + is_lsm : bool + True if page contains LSM CZ_LSM_INFO tag. + description : str + Image description + description1 : str + Additional description + is_imagej : None or str + ImageJ metadata + software : str + Software used to create the TIFF file + datetime : datetime.datetime + Creation date and time + + Metadata for writing + -------------------- + photometric : {'minisblack', 'miniswhite', 'rgb'} + The color space of the image data. + By default this setting is inferred from the data shape. + planarconfig : {'contig', 'planar'} + Specifies if samples are stored contiguous or in separate planes. + By default this setting is inferred from the data shape. + 'contig': last dimension contains samples. + 'planar': third last dimension contains samples. + resolution : (float, float) or ((int, int), (int, int)) + X and Y resolution in dots per inch as float or rational numbers. + description : str + The subject of the image. Saved with the first page only. + compress : int + Values from 0 to 9 controlling the level of zlib (deflate) compression. + If 0, data are written uncompressed (default). + predictor : bool + If True, horizontal differencing is applied before compression. + Note that using an int literal 1 actually means no prediction scheme + will be used. + volume : bool + If True, volume data are stored in one tile (if applicable) using + the SGI image_depth and tile_depth tags. + Image width and depth must be multiple of 16. + Few software can read this format, e.g. MeVisLab. + writeshape : bool + If True, write the data shape to the image_description tag + if necessary and no other description is given. + extratags: sequence of tuples + Additional tags as [(code, dtype, count, value, writeonce)]. + + code : int + The TIFF tag Id. + dtype : str + Data type of items in 'value' in Python struct format. + One of B, s, H, I, 2I, b, h, i, f, d, Q, or q. + count : int + Number of data values. Not used for string values. + value : sequence + 'Count' values compatible with 'dtype'. + writeonce : bool + If True, the tag is written to the first page only. + """ + + def _can_read(self, request): + try: + _tifffile.TiffFile(request.get_file(), **request.kwargs) + except ValueError: + # vendored backend raises value exception + return False + except _tifffile.TiffFileError: # pragma: no-cover + # current version raises custom exception + return False + finally: + request.get_file().seek(0) + + return True + + def _can_write(self, request): + if request._uri_type in [URI_FILE, URI_BYTES]: + pass # special URI + elif request.extension not in self.extensions: + return False + + try: + _tifffile.TiffWriter(request.get_file(), **request.kwargs) + except ValueError: + # vendored backend raises value exception + return False + except _tifffile.TiffFileError: # pragma: no-cover + # current version raises custom exception + return False + finally: + request.get_file().seek(0) + return True + + # -- reader + + class Reader(Format.Reader): + def _open(self, **kwargs): + # Allow loading from http; tifffile uses seek, so download first + if self.request.filename.startswith(("http://", "https://")): + self._f = f = open(self.request.get_local_filename(), "rb") + else: + self._f = None + f = self.request.get_file() + self._tf = _tifffile.TiffFile(f, **kwargs) + + def _close(self): + self._tf.close() + if self._f is not None: + self._f.close() + + def _get_length(self): + return len(self._tf.series) + + def _get_data(self, index): + if index < 0 or index >= self._get_length(): + raise IndexError("Index out of range while reading from tiff file") + + im = self._tf.asarray(series=index) + meta = self._get_meta_data(index) + + return im, meta + + def _get_meta_data(self, index): + meta = {} + page = self._tf.pages[index or 0] + for key in READ_METADATA_KEYS: + try: + meta[key] = getattr(page, key) + except Exception: + pass + + # tifffile <= 0.12.1 use datetime, newer use DateTime + for key in ("datetime", "DateTime"): + try: + meta["datetime"] = datetime.datetime.strptime( + page.tags[key].value, "%Y:%m:%d %H:%M:%S" + ) + break + except Exception: + pass + + if 296 in page.tags: + meta["resolution_unit"] = page.tags[296].value.value + + if 282 in page.tags and 283 in page.tags and 296 in page.tags: + resolution_x = page.tags[282].value + resolution_y = page.tags[283].value + if resolution_x[1] == 0 or resolution_y[1] == 0: + warnings.warn( + "Ignoring resolution metadata, " + "because at least one direction has a 0 denominator.", + RuntimeWarning, + ) + else: + meta["resolution"] = ( + resolution_x[0] / resolution_x[1], + resolution_y[0] / resolution_y[1], + page.tags[296].value.name, + ) + + return meta + + # -- writer + class Writer(Format.Writer): + def _open(self, bigtiff=None, byteorder=None, software=None): + try: + self._tf = _tifffile.TiffWriter( + self.request.get_file(), + bigtiff=bigtiff, + byteorder=byteorder, + software=software, + ) + self._software = None + except TypeError: + # In tifffile >= 0.15, the `software` arg is passed to + # TiffWriter.save + self._tf = _tifffile.TiffWriter( + self.request.get_file(), bigtiff=bigtiff, byteorder=byteorder + ) + self._software = software + + self._meta = {} + self._frames_written = 0 + + def _close(self): + self._tf.close() + + def _append_data(self, im, meta): + if meta is not None: + meta = self._sanitize_meta(meta) + else: + # Use global metadata for first frame + meta = self._meta if self._frames_written == 0 else {} + if self._software is not None and self._frames_written == 0: + meta["software"] = self._software + # No need to check self.request.mode; tifffile figures out whether + # this is a single page, or all page data at once. + try: + # TiffWriter.save has been deprecated in version 2020.9.30 + write_meth = self._tf.write + except AttributeError: + write_meth = self._tf.save + write_meth(np.asanyarray(im), contiguous=False, **meta) + self._frames_written += 1 + + @staticmethod + def _sanitize_meta(meta): + ret = {} + for key, value in meta.items(): + if key in WRITE_METADATA_KEYS: + # Special case of previously read `predictor` int value + # 1(=NONE) translation to False expected by TiffWriter.save + if key == "predictor" and not isinstance(value, bool): + ret[key] = value > 1 + elif key == "compress" and value != 0: + warnings.warn( + "The use of `compress` is deprecated. Use `compression` and `compressionargs` instead.", + DeprecationWarning, + ) + + if _tifffile.__version__ < "2022": + ret["compression"] = (8, value) + else: + ret["compression"] = "zlib" + ret["compressionargs"] = {"level": value} + else: + ret[key] = value + return ret + + def set_meta_data(self, meta): + self._meta = self._sanitize_meta(meta) diff --git a/minigpt2/lib/python3.10/site-packages/ray/util/collective/collective_group/__pycache__/__init__.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/util/collective/collective_group/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d994fa2da1441d932c11381eec4cc5bff2502186 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/util/collective/collective_group/__pycache__/__init__.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/util/collective/collective_group/__pycache__/base_collective_group.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/util/collective/collective_group/__pycache__/base_collective_group.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f02e76cf0e74bb7bc05427e15cd173dd7e77b19 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/util/collective/collective_group/__pycache__/base_collective_group.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/util/collective/collective_group/__pycache__/cuda_stream.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/util/collective/collective_group/__pycache__/cuda_stream.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46831c74684ba1570f79b563af445c5d059fd4ff Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/util/collective/collective_group/__pycache__/cuda_stream.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/util/collective/collective_group/__pycache__/gloo_collective_group.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/util/collective/collective_group/__pycache__/gloo_collective_group.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7bc57472b0c61f3cc343e5f2920770deb3bcfb4 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/util/collective/collective_group/__pycache__/gloo_collective_group.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/util/collective/collective_group/__pycache__/gloo_util.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/util/collective/collective_group/__pycache__/gloo_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9cc8b988dfd1dc0f9075d938790ba7da8490a8e5 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/util/collective/collective_group/__pycache__/gloo_util.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/util/collective/collective_group/__pycache__/nccl_util.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/util/collective/collective_group/__pycache__/nccl_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a97f68262cccb9535076e4790d0ce468f7e5b888 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/util/collective/collective_group/__pycache__/nccl_util.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/util/multiprocessing/__init__.py b/minigpt2/lib/python3.10/site-packages/ray/util/multiprocessing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5b390439f5e1d85b3537f9317e61c5f7aa48d4b9 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/util/multiprocessing/__init__.py @@ -0,0 +1,5 @@ +from multiprocessing import TimeoutError, JoinableQueue + +from .pool import Pool + +__all__ = ["Pool", "TimeoutError", "JoinableQueue"] diff --git a/minigpt2/lib/python3.10/site-packages/ray/util/multiprocessing/__pycache__/__init__.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/util/multiprocessing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fad0af5569b6a43bcf61e34425411193d1a9586 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/util/multiprocessing/__pycache__/__init__.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/util/multiprocessing/__pycache__/pool.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/util/multiprocessing/__pycache__/pool.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee3b66f737c06265796f21b19b18b49a6a94f337 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/util/multiprocessing/__pycache__/pool.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/util/multiprocessing/pool.py b/minigpt2/lib/python3.10/site-packages/ray/util/multiprocessing/pool.py new file mode 100644 index 0000000000000000000000000000000000000000..91b61e8d7d0cf74a34c4097241d9f9e9d0888c23 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/util/multiprocessing/pool.py @@ -0,0 +1,995 @@ +import collections +import copy +import gc +import itertools +import logging +import os +import queue +import sys +import threading +import time +from multiprocessing import TimeoutError +from typing import Any, Callable, Dict, Hashable, Iterable, List, Optional, Tuple + +import ray +from ray._private.usage import usage_lib +from ray.util import log_once + +try: + from joblib._parallel_backends import SafeFunction + from joblib.parallel import BatchedCalls, parallel_backend +except ImportError: + BatchedCalls = None + parallel_backend = None + SafeFunction = None + + +logger = logging.getLogger(__name__) + +RAY_ADDRESS_ENV = "RAY_ADDRESS" + + +def _put_in_dict_registry( + obj: Any, registry_hashable: Dict[Hashable, ray.ObjectRef] +) -> ray.ObjectRef: + if obj not in registry_hashable: + ret = ray.put(obj) + registry_hashable[obj] = ret + else: + ret = registry_hashable[obj] + return ret + + +def _put_in_list_registry( + obj: Any, registry: List[Tuple[Any, ray.ObjectRef]] +) -> ray.ObjectRef: + try: + ret = next((ref for o, ref in registry if o is obj)) + except StopIteration: + ret = ray.put(obj) + registry.append((obj, ret)) + return ret + + +def ray_put_if_needed( + obj: Any, + registry: Optional[List[Tuple[Any, ray.ObjectRef]]] = None, + registry_hashable: Optional[Dict[Hashable, ray.ObjectRef]] = None, +) -> ray.ObjectRef: + """ray.put obj in object store if it's not an ObjRef and bigger than 100 bytes, + with support for list and dict registries""" + if isinstance(obj, ray.ObjectRef) or sys.getsizeof(obj) < 100: + return obj + ret = obj + if registry_hashable is not None: + try: + ret = _put_in_dict_registry(obj, registry_hashable) + except TypeError: + if registry is not None: + ret = _put_in_list_registry(obj, registry) + elif registry is not None: + ret = _put_in_list_registry(obj, registry) + return ret + + +def ray_get_if_needed(obj: Any) -> Any: + """If obj is an ObjectRef, do ray.get, otherwise return obj""" + if isinstance(obj, ray.ObjectRef): + return ray.get(obj) + return obj + + +if BatchedCalls is not None: + + class RayBatchedCalls(BatchedCalls): + """Joblib's BatchedCalls with basic Ray object store management + + This functionality is provided through the put_items_in_object_store, + which uses external registries (list and dict) containing objects + and their ObjectRefs.""" + + def put_items_in_object_store( + self, + registry: Optional[List[Tuple[Any, ray.ObjectRef]]] = None, + registry_hashable: Optional[Dict[Hashable, ray.ObjectRef]] = None, + ): + """Puts all applicable (kw)args in self.items in object store + + Takes two registries - list for unhashable objects and dict + for hashable objects. The registries are a part of a Pool object. + The method iterates through all entries in items list (usually, + there will be only one, but the number depends on joblib Parallel + settings) and puts all of the args and kwargs into the object + store, updating the registries. + If an arg or kwarg is already in a registry, it will not be + put again, and instead, the cached object ref will be used.""" + new_items = [] + for func, args, kwargs in self.items: + args = [ + ray_put_if_needed(arg, registry, registry_hashable) for arg in args + ] + kwargs = { + k: ray_put_if_needed(v, registry, registry_hashable) + for k, v in kwargs.items() + } + new_items.append((func, args, kwargs)) + self.items = new_items + + def __call__(self): + # Exactly the same as in BatchedCalls, with the + # difference being that it gets args and kwargs from + # object store (which have been put in there by + # put_items_in_object_store) + + # Set the default nested backend to self._backend but do + # not set the change the default number of processes to -1 + with parallel_backend(self._backend, n_jobs=self._n_jobs): + return [ + func( + *[ray_get_if_needed(arg) for arg in args], + **{k: ray_get_if_needed(v) for k, v in kwargs.items()}, + ) + for func, args, kwargs in self.items + ] + + def __reduce__(self): + # Exactly the same as in BatchedCalls, with the + # difference being that it returns RayBatchedCalls + # instead + if self._reducer_callback is not None: + self._reducer_callback() + # no need pickle the callback. + return ( + RayBatchedCalls, + (self.items, (self._backend, self._n_jobs), None, self._pickle_cache), + ) + +else: + RayBatchedCalls = None + + +# Helper function to divide a by b and round the result up. +def div_round_up(a, b): + return -(-a // b) + + +class PoolTaskError(Exception): + def __init__(self, underlying): + self.underlying = underlying + + +class ResultThread(threading.Thread): + """Thread that collects results from distributed actors. + + It winds down when either: + - A pre-specified number of objects has been processed + - When the END_SENTINEL (submitted through self.add_object_ref()) + has been received and all objects received before that have been + processed. + + Initialize the thread with total_object_refs = float('inf') to wait for the + END_SENTINEL. + + Args: + object_refs (List[RayActorObjectRefs]): ObjectRefs to Ray Actor calls. + Thread tracks whether they are ready. More ObjectRefs may be added + with add_object_ref (or _add_object_ref internally) until the object + count reaches total_object_refs. + single_result: Should be True if the thread is managing function + with a single result (like apply_async). False if the thread is managing + a function with a List of results. + callback: called only once at the end of the thread + if no results were errors. If single_result=True, and result is + not an error, callback is invoked with the result as the only + argument. If single_result=False, callback is invoked with + a list of all the results as the only argument. + error_callback: called only once on the first result + that errors. Should take an Exception as the only argument. + If no result errors, this callback is not called. + total_object_refs: Number of ObjectRefs that this thread + expects to be ready. May be more than len(object_refs) since + more ObjectRefs can be submitted after the thread starts. + If None, defaults to len(object_refs). If float("inf"), thread runs + until END_SENTINEL (submitted through self.add_object_ref()) + has been received and all objects received before that have + been processed. + """ + + END_SENTINEL = None + + def __init__( + self, + object_refs: list, + single_result: bool = False, + callback: callable = None, + error_callback: callable = None, + total_object_refs: Optional[int] = None, + ): + threading.Thread.__init__(self, daemon=True) + self._got_error = False + self._object_refs = [] + self._num_ready = 0 + self._results = [] + self._ready_index_queue = queue.Queue() + self._single_result = single_result + self._callback = callback + self._error_callback = error_callback + self._total_object_refs = total_object_refs or len(object_refs) + self._indices = {} + # Thread-safe queue used to add ObjectRefs to fetch after creating + # this thread (used to lazily submit for imap and imap_unordered). + self._new_object_refs = queue.Queue() + for object_ref in object_refs: + self._add_object_ref(object_ref) + + def _add_object_ref(self, object_ref): + self._indices[object_ref] = len(self._object_refs) + self._object_refs.append(object_ref) + self._results.append(None) + + def add_object_ref(self, object_ref): + self._new_object_refs.put(object_ref) + + def run(self): + unready = copy.copy(self._object_refs) + aggregated_batch_results = [] + + # Run for a specific number of objects if self._total_object_refs is finite. + # Otherwise, process all objects received prior to the stop signal, given by + # self.add_object(END_SENTINEL). + while self._num_ready < self._total_object_refs: + # Get as many new IDs from the queue as possible without blocking, + # unless we have no IDs to wait on, in which case we block. + while True: + try: + block = len(unready) == 0 + new_object_ref = self._new_object_refs.get(block=block) + if new_object_ref is self.END_SENTINEL: + # Receiving the END_SENTINEL object is the signal to stop. + # Store the total number of objects. + self._total_object_refs = len(self._object_refs) + else: + self._add_object_ref(new_object_ref) + unready.append(new_object_ref) + except queue.Empty: + # queue.Empty means no result was retrieved if block=False. + break + + [ready_id], unready = ray.wait(unready, num_returns=1) + try: + batch = ray.get(ready_id) + except ray.exceptions.RayError as e: + batch = [e] + + # The exception callback is called only once on the first result + # that errors. If no result errors, it is never called. + if not self._got_error: + for result in batch: + if isinstance(result, Exception): + self._got_error = True + if self._error_callback is not None: + self._error_callback(result) + break + else: + aggregated_batch_results.append(result) + + self._num_ready += 1 + self._results[self._indices[ready_id]] = batch + self._ready_index_queue.put(self._indices[ready_id]) + + # The regular callback is called only once on the entire List of + # results as long as none of the results were errors. If any results + # were errors, the regular callback is never called; instead, the + # exception callback is called on the first erroring result. + # + # This callback is called outside the while loop to ensure that it's + # called on the entire list of results– not just a single batch. + if not self._got_error and self._callback is not None: + if not self._single_result: + self._callback(aggregated_batch_results) + else: + # On a thread handling a function with a single result + # (e.g. apply_async), we call the callback on just that result + # instead of on a list encaspulating that result + self._callback(aggregated_batch_results[0]) + + def got_error(self): + # Should only be called after the thread finishes. + return self._got_error + + def result(self, index): + # Should only be called on results that are ready. + return self._results[index] + + def results(self): + # Should only be called after the thread finishes. + return self._results + + def next_ready_index(self, timeout=None): + try: + return self._ready_index_queue.get(timeout=timeout) + except queue.Empty: + # queue.Queue signals a timeout by raising queue.Empty. + raise TimeoutError + + +class AsyncResult: + """An asynchronous interface to task results. + + This should not be constructed directly. + """ + + def __init__( + self, chunk_object_refs, callback=None, error_callback=None, single_result=False + ): + self._single_result = single_result + self._result_thread = ResultThread( + chunk_object_refs, single_result, callback, error_callback + ) + self._result_thread.start() + + def wait(self, timeout=None): + """ + Returns once the result is ready or the timeout expires (does not + raise TimeoutError). + + Args: + timeout: timeout in milliseconds. + """ + + self._result_thread.join(timeout) + + def get(self, timeout=None): + self.wait(timeout) + if self._result_thread.is_alive(): + raise TimeoutError + + results = [] + for batch in self._result_thread.results(): + for result in batch: + if isinstance(result, PoolTaskError): + raise result.underlying + elif isinstance(result, Exception): + raise result + results.extend(batch) + + if self._single_result: + return results[0] + + return results + + def ready(self): + """ + Returns true if the result is ready, else false if the tasks are still + running. + """ + + return not self._result_thread.is_alive() + + def successful(self): + """ + Returns true if none of the submitted tasks errored, else false. Should + only be called once the result is ready (can be checked using `ready`). + """ + + if not self.ready(): + raise ValueError(f"{self!r} not ready") + return not self._result_thread.got_error() + + +class IMapIterator: + """Base class for OrderedIMapIterator and UnorderedIMapIterator.""" + + def __init__(self, pool, func, iterable, chunksize=None): + self._pool = pool + self._func = func + self._next_chunk_index = 0 + self._finished_iterating = False + # List of bools indicating if the given chunk is ready or not for all + # submitted chunks. Ordering mirrors that in the in the ResultThread. + self._submitted_chunks = [] + self._ready_objects = collections.deque() + self._iterator = iter(iterable) + if isinstance(iterable, collections.abc.Iterator): + # Got iterator (which has no len() function). + # Make default chunksize 1 instead of using _calculate_chunksize(). + # Indicate unknown queue length, requiring explicit stopping. + self._chunksize = chunksize or 1 + result_list_size = float("inf") + else: + self._chunksize = chunksize or pool._calculate_chunksize(iterable) + result_list_size = div_round_up(len(iterable), chunksize) + + self._result_thread = ResultThread([], total_object_refs=result_list_size) + self._result_thread.start() + + for _ in range(len(self._pool._actor_pool)): + self._submit_next_chunk() + + def _submit_next_chunk(self): + # The full iterable has already been submitted, so no-op. + if self._finished_iterating: + return + + actor_index = len(self._submitted_chunks) % len(self._pool._actor_pool) + chunk_iterator = itertools.islice(self._iterator, self._chunksize) + + # Check whether we have run out of samples. + # This consumes the original iterator, so we convert to a list and back + chunk_list = list(chunk_iterator) + if len(chunk_list) < self._chunksize: + # Reached end of self._iterator + self._finished_iterating = True + if len(chunk_list) == 0: + # Nothing to do, return. + return + chunk_iterator = iter(chunk_list) + + new_chunk_id = self._pool._submit_chunk( + self._func, chunk_iterator, self._chunksize, actor_index + ) + self._submitted_chunks.append(False) + # Wait for the result + self._result_thread.add_object_ref(new_chunk_id) + # If we submitted the final chunk, notify the result thread + if self._finished_iterating: + self._result_thread.add_object_ref(ResultThread.END_SENTINEL) + + def __iter__(self): + return self + + def __next__(self): + return self.next() + + def next(self): + # Should be implemented by subclasses. + raise NotImplementedError + + +class OrderedIMapIterator(IMapIterator): + """Iterator to the results of tasks submitted using `imap`. + + The results are returned in the same order that they were submitted, even + if they don't finish in that order. Only one batch of tasks per actor + process is submitted at a time - the rest are submitted as results come in. + + Should not be constructed directly. + """ + + def next(self, timeout=None): + if len(self._ready_objects) == 0: + if self._finished_iterating and ( + self._next_chunk_index == len(self._submitted_chunks) + ): + # Finish when all chunks have been dispatched and processed + # Notify the calling process that the work is done. + raise StopIteration + + # This loop will break when the next index in order is ready or + # self._result_thread.next_ready_index() raises a timeout. + index = -1 + while index != self._next_chunk_index: + start = time.time() + index = self._result_thread.next_ready_index(timeout=timeout) + self._submit_next_chunk() + self._submitted_chunks[index] = True + if timeout is not None: + timeout = max(0, timeout - (time.time() - start)) + + while ( + self._next_chunk_index < len(self._submitted_chunks) + and self._submitted_chunks[self._next_chunk_index] + ): + for result in self._result_thread.result(self._next_chunk_index): + self._ready_objects.append(result) + self._next_chunk_index += 1 + + return self._ready_objects.popleft() + + +class UnorderedIMapIterator(IMapIterator): + """Iterator to the results of tasks submitted using `imap`. + + The results are returned in the order that they finish. Only one batch of + tasks per actor process is submitted at a time - the rest are submitted as + results come in. + + Should not be constructed directly. + """ + + def next(self, timeout=None): + if len(self._ready_objects) == 0: + if self._finished_iterating and ( + self._next_chunk_index == len(self._submitted_chunks) + ): + # Finish when all chunks have been dispatched and processed + # Notify the calling process that the work is done. + raise StopIteration + + index = self._result_thread.next_ready_index(timeout=timeout) + self._submit_next_chunk() + + for result in self._result_thread.result(index): + self._ready_objects.append(result) + self._next_chunk_index += 1 + + return self._ready_objects.popleft() + + +@ray.remote(num_cpus=0) +class PoolActor: + """Actor used to process tasks submitted to a Pool.""" + + def __init__(self, initializer=None, initargs=None): + if initializer: + initargs = initargs or () + initializer(*initargs) + + def ping(self): + # Used to wait for this actor to be initialized. + pass + + def run_batch(self, func, batch): + results = [] + for args, kwargs in batch: + args = args or () + kwargs = kwargs or {} + try: + results.append(func(*args, **kwargs)) + except Exception as e: + results.append(PoolTaskError(e)) + return results + + +# https://docs.python.org/3/library/multiprocessing.html#module-multiprocessing.pool +class Pool: + """A pool of actor processes that is used to process tasks in parallel. + + Args: + processes: number of actor processes to start in the pool. Defaults to + the number of cores in the Ray cluster if one is already running, + otherwise the number of cores on this machine. + initializer: function to be run in each actor when it starts up. + initargs: iterable of arguments to the initializer function. + maxtasksperchild: maximum number of tasks to run in each actor process. + After a process has executed this many tasks, it will be killed and + replaced with a new one. + ray_address: address of the Ray cluster to run on. If None, a new local + Ray cluster will be started on this machine. Otherwise, this will + be passed to `ray.init()` to connect to a running cluster. This may + also be specified using the `RAY_ADDRESS` environment variable. + ray_remote_args: arguments used to configure the Ray Actors making up + the pool. + """ + + def __init__( + self, + processes: Optional[int] = None, + initializer: Optional[Callable] = None, + initargs: Optional[Iterable] = None, + maxtasksperchild: Optional[int] = None, + context: Any = None, + ray_address: Optional[str] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + ): + usage_lib.record_library_usage("util.multiprocessing.Pool") + + self._closed = False + self._initializer = initializer + self._initargs = initargs + self._maxtasksperchild = maxtasksperchild or -1 + self._actor_deletion_ids = [] + self._registry: List[Tuple[Any, ray.ObjectRef]] = [] + self._registry_hashable: Dict[Hashable, ray.ObjectRef] = {} + self._current_index = 0 + self._ray_remote_args = ray_remote_args or {} + self._pool_actor = None + + if context and log_once("context_argument_warning"): + logger.warning( + "The 'context' argument is not supported using " + "ray. Please refer to the documentation for how " + "to control ray initialization." + ) + + processes = self._init_ray(processes, ray_address) + self._start_actor_pool(processes) + + def _init_ray(self, processes=None, ray_address=None): + # Initialize ray. If ray is already initialized, we do nothing. + # Else, the priority is: + # ray_address argument > RAY_ADDRESS > start new local cluster. + if not ray.is_initialized(): + # Cluster mode. + if ray_address is None and ( + RAY_ADDRESS_ENV in os.environ + or ray._private.utils.read_ray_address() is not None + ): + ray.init() + elif ray_address is not None: + init_kwargs = {} + if ray_address == "local": + init_kwargs["num_cpus"] = processes + ray.init(address=ray_address, **init_kwargs) + # Local mode. + else: + ray.init(num_cpus=processes) + + ray_cpus = int(ray._private.state.cluster_resources()["CPU"]) + if processes is None: + processes = ray_cpus + if processes <= 0: + raise ValueError("Processes in the pool must be >0.") + if ray_cpus < processes: + raise ValueError( + "Tried to start a pool with {} processes on an " + "existing ray cluster, but there are only {} " + "CPUs in the ray cluster.".format(processes, ray_cpus) + ) + + return processes + + def _start_actor_pool(self, processes): + self._pool_actor = None + self._actor_pool = [self._new_actor_entry() for _ in range(processes)] + ray.get([actor.ping.remote() for actor, _ in self._actor_pool]) + + def _wait_for_stopping_actors(self, timeout=None): + if len(self._actor_deletion_ids) == 0: + return + if timeout is not None: + timeout = float(timeout) + + _, deleting = ray.wait( + self._actor_deletion_ids, + num_returns=len(self._actor_deletion_ids), + timeout=timeout, + ) + self._actor_deletion_ids = deleting + + def _stop_actor(self, actor): + # Check and clean up any outstanding IDs corresponding to deletions. + self._wait_for_stopping_actors(timeout=0.0) + # The deletion task will block until the actor has finished executing + # all pending tasks. + self._actor_deletion_ids.append(actor.__ray_terminate__.remote()) + + def _new_actor_entry(self): + # NOTE(edoakes): The initializer function can't currently be used to + # modify the global namespace (e.g., import packages or set globals) + # due to a limitation in cloudpickle. + # Cache the PoolActor with options + if not self._pool_actor: + self._pool_actor = PoolActor.options(**self._ray_remote_args) + return (self._pool_actor.remote(self._initializer, self._initargs), 0) + + def _next_actor_index(self): + if self._current_index == len(self._actor_pool) - 1: + self._current_index = 0 + else: + self._current_index += 1 + return self._current_index + + # Batch should be a list of tuples: (args, kwargs). + def _run_batch(self, actor_index, func, batch): + actor, count = self._actor_pool[actor_index] + object_ref = actor.run_batch.remote(func, batch) + count += 1 + assert self._maxtasksperchild == -1 or count <= self._maxtasksperchild + if count == self._maxtasksperchild: + self._stop_actor(actor) + actor, count = self._new_actor_entry() + self._actor_pool[actor_index] = (actor, count) + return object_ref + + def apply( + self, + func: Callable, + args: Optional[Tuple] = None, + kwargs: Optional[Dict] = None, + ): + """Run the given function on a random actor process and return the + result synchronously. + + Args: + func: function to run. + args: optional arguments to the function. + kwargs: optional keyword arguments to the function. + + Returns: + The result. + """ + + return self.apply_async(func, args, kwargs).get() + + def apply_async( + self, + func: Callable, + args: Optional[Tuple] = None, + kwargs: Optional[Dict] = None, + callback: Callable[[Any], None] = None, + error_callback: Callable[[Exception], None] = None, + ): + """Run the given function on a random actor process and return an + asynchronous interface to the result. + + Args: + func: function to run. + args: optional arguments to the function. + kwargs: optional keyword arguments to the function. + callback: callback to be executed on the result once it is finished + only if it succeeds. + error_callback: callback to be executed the result once it is + finished only if the task errors. The exception raised by the + task will be passed as the only argument to the callback. + + Returns: + AsyncResult containing the result. + """ + + self._check_running() + func = self._convert_to_ray_batched_calls_if_needed(func) + object_ref = self._run_batch(self._next_actor_index(), func, [(args, kwargs)]) + return AsyncResult([object_ref], callback, error_callback, single_result=True) + + def _convert_to_ray_batched_calls_if_needed(self, func: Callable) -> Callable: + """Convert joblib's BatchedCalls to RayBatchedCalls for ObjectRef caching. + + This converts joblib's BatchedCalls callable, which is a collection of + functions with their args and kwargs to be ran sequentially in an + Actor, to a RayBatchedCalls callable, which provides identical + functionality in addition to a method which ensures that common + args and kwargs are put into the object store just once, saving time + and memory. That method is then ran. + + If func is not a BatchedCalls instance, it is returned without changes. + + The ObjectRefs are cached inside two registries (_registry and + _registry_hashable), which are common for the entire Pool and are + cleaned on close.""" + if RayBatchedCalls is None: + return func + orginal_func = func + # SafeFunction is a Python 2 leftover and can be + # safely removed. + if isinstance(func, SafeFunction): + func = func.func + if isinstance(func, BatchedCalls): + func = RayBatchedCalls( + func.items, + (func._backend, func._n_jobs), + func._reducer_callback, + func._pickle_cache, + ) + # go through all the items and replace args and kwargs with + # ObjectRefs, caching them in registries + func.put_items_in_object_store(self._registry, self._registry_hashable) + else: + func = orginal_func + return func + + def _calculate_chunksize(self, iterable): + chunksize, extra = divmod(len(iterable), len(self._actor_pool) * 4) + if extra: + chunksize += 1 + return chunksize + + def _submit_chunk(self, func, iterator, chunksize, actor_index, unpack_args=False): + chunk = [] + while len(chunk) < chunksize: + try: + args = next(iterator) + if not unpack_args: + args = (args,) + chunk.append((args, {})) + except StopIteration: + break + + # Nothing to submit. The caller should prevent this. + assert len(chunk) > 0 + + return self._run_batch(actor_index, func, chunk) + + def _chunk_and_run(self, func, iterable, chunksize=None, unpack_args=False): + if not hasattr(iterable, "__len__"): + iterable = list(iterable) + + if chunksize is None: + chunksize = self._calculate_chunksize(iterable) + + iterator = iter(iterable) + chunk_object_refs = [] + while len(chunk_object_refs) * chunksize < len(iterable): + actor_index = len(chunk_object_refs) % len(self._actor_pool) + chunk_object_refs.append( + self._submit_chunk( + func, iterator, chunksize, actor_index, unpack_args=unpack_args + ) + ) + + return chunk_object_refs + + def _map_async( + self, + func, + iterable, + chunksize=None, + unpack_args=False, + callback=None, + error_callback=None, + ): + self._check_running() + object_refs = self._chunk_and_run( + func, iterable, chunksize=chunksize, unpack_args=unpack_args + ) + return AsyncResult(object_refs, callback, error_callback) + + def map(self, func: Callable, iterable: Iterable, chunksize: Optional[int] = None): + """Run the given function on each element in the iterable round-robin + on the actor processes and return the results synchronously. + + Args: + func: function to run. + iterable: iterable of objects to be passed as the sole argument to + func. + chunksize: number of tasks to submit as a batch to each actor + process. If unspecified, a suitable chunksize will be chosen. + + Returns: + A list of results. + """ + + return self._map_async( + func, iterable, chunksize=chunksize, unpack_args=False + ).get() + + def map_async( + self, + func: Callable, + iterable: Iterable, + chunksize: Optional[int] = None, + callback: Callable[[List], None] = None, + error_callback: Callable[[Exception], None] = None, + ): + """Run the given function on each element in the iterable round-robin + on the actor processes and return an asynchronous interface to the + results. + + Args: + func: function to run. + iterable: iterable of objects to be passed as the only argument to + func. + chunksize: number of tasks to submit as a batch to each actor + process. If unspecified, a suitable chunksize will be chosen. + callback: Will only be called if none of the results were errors, + and will only be called once after all results are finished. + A Python List of all the finished results will be passed as the + only argument to the callback. + error_callback: callback executed on the first errored result. + The Exception raised by the task will be passed as the only + argument to the callback. + + Returns: + AsyncResult + """ + return self._map_async( + func, + iterable, + chunksize=chunksize, + unpack_args=False, + callback=callback, + error_callback=error_callback, + ) + + def starmap(self, func, iterable, chunksize=None): + """Same as `map`, but unpacks each element of the iterable as the + arguments to func like: [func(*args) for args in iterable]. + """ + + return self._map_async( + func, iterable, chunksize=chunksize, unpack_args=True + ).get() + + def starmap_async( + self, + func: Callable, + iterable: Iterable, + callback: Callable[[List], None] = None, + error_callback: Callable[[Exception], None] = None, + ): + """Same as `map_async`, but unpacks each element of the iterable as the + arguments to func like: [func(*args) for args in iterable]. + """ + + return self._map_async( + func, + iterable, + unpack_args=True, + callback=callback, + error_callback=error_callback, + ) + + def imap(self, func: Callable, iterable: Iterable, chunksize: Optional[int] = 1): + """Same as `map`, but only submits one batch of tasks to each actor + process at a time. + + This can be useful if the iterable of arguments is very large or each + task's arguments consumes a large amount of resources. + + The results are returned in the order corresponding to their arguments + in the iterable. + + Returns: + OrderedIMapIterator + """ + + self._check_running() + return OrderedIMapIterator(self, func, iterable, chunksize=chunksize) + + def imap_unordered( + self, func: Callable, iterable: Iterable, chunksize: Optional[int] = 1 + ): + """Same as `map`, but only submits one batch of tasks to each actor + process at a time. + + This can be useful if the iterable of arguments is very large or each + task's arguments consumes a large amount of resources. + + The results are returned in the order that they finish. + + Returns: + UnorderedIMapIterator + """ + + self._check_running() + return UnorderedIMapIterator(self, func, iterable, chunksize=chunksize) + + def _check_running(self): + if self._closed: + raise ValueError("Pool not running") + + def __enter__(self): + self._check_running() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.terminate() + + def close(self): + """Close the pool. + + Prevents any more tasks from being submitted on the pool but allows + outstanding work to finish. + """ + + self._registry.clear() + self._registry_hashable.clear() + for actor, _ in self._actor_pool: + self._stop_actor(actor) + self._closed = True + gc.collect() + + def terminate(self): + """Close the pool. + + Prevents any more tasks from being submitted on the pool and stops + outstanding work. + """ + + if not self._closed: + self.close() + for actor, _ in self._actor_pool: + ray.kill(actor) + + def join(self): + """Wait for the actors in a closed pool to exit. + + If the pool was closed using `close`, this will return once all + outstanding work is completed. + + If the pool was closed using `terminate`, this will return quickly. + """ + + if not self._closed: + raise ValueError("Pool is still running") + self._wait_for_stopping_actors() diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_foreach_non_finite_check_and_unscale_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_foreach_non_finite_check_and_unscale_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0488c513cf4710cfca1774a132ce4e496289ed4f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_foreach_non_finite_check_and_unscale_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _amp_foreach_non_finite_check_and_unscale_ { + using schema = void (at::TensorList, at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_amp_foreach_non_finite_check_and_unscale_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> ()") + static void call(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale); +}; + +struct TORCH_API _amp_foreach_non_finite_check_and_unscale_out { + using schema = void (at::TensorList, at::Tensor &, const at::Tensor &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_amp_foreach_non_finite_check_and_unscale") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out); +}; + +struct TORCH_API _amp_foreach_non_finite_check_and_unscale { + using schema = ::std::tuple<::std::vector,at::Tensor> (at::TensorList, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_amp_foreach_non_finite_check_and_unscale") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_amp_foreach_non_finite_check_and_unscale(Tensor[] self, Tensor found_inf, Tensor inv_scale) -> (Tensor[] self_out, Tensor found_inf_out)") + static ::std::tuple<::std::vector,at::Tensor> call(at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale); + static ::std::tuple<::std::vector,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Half_compositeimplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Half_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..18f52404c537743e032003f1bcc4cb6b2b9f84ee --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Half_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _cast_Half(const at::Tensor & self, bool non_blocking=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Int_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Int_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9efc821dba9a76be10add00499ad55bb92e4de4f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Int_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _cast_Int(const at::Tensor & self, bool non_blocking=false); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_physical.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_physical.h new file mode 100644 index 0000000000000000000000000000000000000000..964cd88ee08fe799050009c5bc2bdc5115a73bf6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_physical.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_conj_physical(Tensor self) -> Tensor +inline at::Tensor _conj_physical(const at::Tensor & self) { + return at::_ops::_conj_physical::call(self); +} + +// aten::_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _conj_physical_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::_conj_physical_out::call(self, out); +} +// aten::_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _conj_physical_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::_conj_physical_out::call(self, out); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_convolution_double_backward_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_convolution_double_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..454f37f28820d89d0c050e84bf1d1b4616cbb5a4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_convolution_double_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _convolution_double_backward(const ::std::optional & ggI, const ::std::optional & ggW, const ::std::optional & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_affine_quantized_cpu_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_affine_quantized_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7d0eb4e8a39f770f134b9affaff9f59527f704db --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_affine_quantized_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _empty_affine_quantized(at::IntArrayRef size, at::TensorOptions options={}, double scale=1, int64_t zero_point=0, ::std::optional memory_format=MemoryFormat::Contiguous); +TORCH_API at::Tensor _empty_affine_quantized(at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, double scale, int64_t zero_point, ::std::optional memory_format); +TORCH_API at::Tensor _empty_affine_quantized_symint(c10::SymIntArrayRef size, at::TensorOptions options={}, double scale=1, int64_t zero_point=0, ::std::optional memory_format=MemoryFormat::Contiguous); +TORCH_API at::Tensor _empty_affine_quantized_symint(c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory, double scale, int64_t zero_point, ::std::optional memory_format); + +} // namespace cpu +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_add.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_add.h new file mode 100644 index 0000000000000000000000000000000000000000..c721860797c2dbc9b7b2170fab0d1b896bf5666d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_add.h @@ -0,0 +1,101 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] +inline ::std::vector _foreach_add(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_add_Scalar::call(self, scalar); +} + +// aten::_foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () +inline void _foreach_add_(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_add__Scalar::call(self, scalar); +} + +// aten::_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[] +inline ::std::vector _foreach_add(at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add_List::call(self, other, alpha); +} + +// aten::_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () +inline void _foreach_add_(at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add__List::call(self, other, alpha); +} + +// aten::_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] +inline ::std::vector _foreach_add(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_add_ScalarList::call(self, scalars); +} + +// aten::_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () +inline void _foreach_add_(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_add__ScalarList::call(self, scalars); +} + +// aten::_foreach_add.Tensor(Tensor[] self, Tensor other, *, Scalar alpha=1) -> Tensor[] +inline ::std::vector _foreach_add(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add_Tensor::call(self, other, alpha); +} + +// aten::_foreach_add_.Tensor(Tensor(a!)[] self, Tensor other, *, Scalar alpha=1) -> () +inline void _foreach_add_(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add__Tensor::call(self, other, alpha); +} + +// aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_add_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_add_Scalar_out::call(self, scalar, out); +} +// aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_add_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_add_Scalar_out::call(self, scalar, out); +} + +// aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () +inline void _foreach_add_out(at::TensorList out, at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add_List_out::call(self, other, alpha, out); +} +// aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () +inline void _foreach_add_outf(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) { + return at::_ops::_foreach_add_List_out::call(self, other, alpha, out); +} + +// aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_add_out(at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_add_ScalarList_out::call(self, scalars, out); +} +// aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_add_outf(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_add_ScalarList_out::call(self, scalars, out); +} + +// aten::_foreach_add.Tensor_out(Tensor[] self, Tensor other, *, Scalar alpha=1, Tensor(a!)[] out) -> () +inline void _foreach_add_out(at::TensorList out, at::TensorList self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add_Tensor_out::call(self, other, alpha, out); +} +// aten::_foreach_add.Tensor_out(Tensor[] self, Tensor other, *, Scalar alpha=1, Tensor(a!)[] out) -> () +inline void _foreach_add_outf(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha, at::TensorList out) { + return at::_ops::_foreach_add_Tensor_out::call(self, other, alpha, out); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_softmax_backward_cpu_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_softmax_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..de07de4ddd2228bca220ff6229768c29a2ddec52 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_softmax_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _masked_softmax_backward(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, ::std::optional dim=::std::nullopt); + +} // namespace cpu +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_native_multi_head_attention_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_native_multi_head_attention_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7cc65cac9ac66f61cc25313ad39867510eeec447 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_native_multi_head_attention_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple _native_multi_head_attention_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const ::std::optional & mask={}, bool need_weights=true, bool average_attn_weights=true, ::std::optional mask_type=::std::nullopt); +TORCH_API ::std::tuple _native_multi_head_attention_outf(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const ::std::optional & mask, bool need_weights, bool average_attn_weights, ::std::optional mask_type, at::Tensor & out0, at::Tensor & out1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_mask_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_mask_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..04b76176dd9471dc47ce600ac292b388b7a0ee9b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_mask_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _nested_tensor_from_mask { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nested_tensor_from_mask") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor") + static at::Tensor call(const at::Tensor & t, const at::Tensor & mask, bool mask_check); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & t, const at::Tensor & mask, bool mask_check); +}; + +struct TORCH_API _nested_tensor_from_mask_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nested_tensor_from_mask") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & t, const at::Tensor & mask, bool mask_check, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & t, const at::Tensor & mask, bool mask_check, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_backward_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..16ec8429f9984a834ba63ad0f8b2fc819d5136e8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _scaled_dot_product_flash_attention_backward { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::SymInt, c10::SymInt, double, bool, const at::Tensor &, const at::Tensor &, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_scaled_dot_product_flash_attention_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value)") + static ::std::tuple call(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, ::std::optional scale); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, ::std::optional scale); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_copy_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..74ebc67068920e2aea0a097392d9a143cb1a29ab --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_broadcast_to_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sparse_broadcast_to_copy { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_broadcast_to_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef size); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size); +}; + +struct TORCH_API _sparse_broadcast_to_copy_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_broadcast_to_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_with_dims_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_with_dims_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8691072344de61b11c3f58a00befb2c5e67f630b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_with_dims_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor _sparse_compressed_tensor_with_dims(int64_t nnz, int64_t dense_dim, at::IntArrayRef size, at::IntArrayRef blocksize, at::ScalarType index_dtype, at::TensorOptions options); +TORCH_API at::Tensor _sparse_compressed_tensor_with_dims(int64_t nnz, int64_t dense_dim, at::IntArrayRef size, at::IntArrayRef blocksize, at::ScalarType index_dtype, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..78a8b439b7da53f7b76d5d9d1528695abc9931de --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _upsample_nearest_exact2d(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor _upsample_nearest_exact2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact2d_outf(const at::Tensor & self, at::IntArrayRef output_size, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out); +TORCH_API at::Tensor & _upsample_nearest_exact2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/ccol_indices_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/ccol_indices_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c9a3df025bf9ddf986c60feb6557561fa2eacfba --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/ccol_indices_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API ccol_indices { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::ccol_indices") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "ccol_indices(Tensor(a) self) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifft2.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifft2.h new file mode 100644 index 0000000000000000000000000000000000000000..4b5ec3554dc5c8b2a85f172403bdcf5e92efbeae --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifft2.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fft_ifft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +inline at::Tensor fft_ifft2(const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_ifft2::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_ifft2(const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_ifft2::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm); + } +} + +// aten::fft_ifft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +inline at::Tensor fft_ifft2_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_ifft2::call(self, s, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_ifft2(const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_ifft2::call(self, s, dim, norm); + } +} + +// aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_ifft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_ifft2_out::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_ifft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_ifft2_out::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm, out); + } +} + +// aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_ifft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, ::std::optional norm, at::Tensor & out) { + return at::_ops::fft_ifft2_out::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_ifft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, ::std::optional norm, at::Tensor & out) { + return at::_ops::fft_ifft2_out::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm, out); + } +} + +// aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_ifft2_symint_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_ifft2_out::call(self, s, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_ifft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional norm=::std::nullopt) { + return at::_ops::fft_ifft2_out::call(self, s, dim, norm, out); + } +} + +// aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_ifft2_symint_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional norm, at::Tensor & out) { + return at::_ops::fft_ifft2_out::call(self, s, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_ifft2_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional norm, at::Tensor & out) { + return at::_ops::fft_ifft2_out::call(self, s, dim, norm, out); + } +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_meta_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6be3e6d3c0dc7b6e4e482a21ba649289591d2781 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); +TORCH_API at::Tensor & gelu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/geometric_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/geometric_native.h new file mode 100644 index 0000000000000000000000000000000000000000..29d5d2c8f5cb984d4309b8adbde622b022e7aaf4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/geometric_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor geometric(const at::Tensor & self, double p, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & geometric_out(const at::Tensor & self, double p, ::std::optional generator, at::Tensor & out); +TORCH_API at::Tensor & geometric_(at::Tensor & self, double p, ::std::optional generator=::std::nullopt); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bbce0fd660060f876a65c2f9523ab28664fd2960 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor huber_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0); +TORCH_API at::Tensor & huber_loss_out(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/im2col.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/im2col.h new file mode 100644 index 0000000000000000000000000000000000000000..bab54186b8a3ced89328464f8d800ba5e3d7303b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/im2col.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & im2col_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::im2col_out::call(self, kernel_size, dilation, padding, stride, out); +} +// aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & im2col_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::im2col_out::call(self, kernel_size, dilation, padding, stride, out); +} + +// aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor +inline at::Tensor im2col(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::im2col::call(self, kernel_size, dilation, padding, stride); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_compositeexplicitautogradnonfunctional_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8840e6f40e210e20efc16443bc127ae901101f5a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor index_copy(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor & index_copy_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7add774680483876342f5803f760aa1c9851c794 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API index_select_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_select") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out); +}; + +struct TORCH_API index_select { + using schema = at::Tensor (const at::Tensor &, int64_t, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_select") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_select(Tensor self, int dim, Tensor index) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, const at::Tensor & index); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index); +}; + +struct TORCH_API index_select_dimname_out { + using schema = at::Tensor & (const at::Tensor &, at::Dimname, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_select") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dimname_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out); +}; + +struct TORCH_API index_select_dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::index_select") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dimname") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::Dimname dim, const at::Tensor & index); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/isinf_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/isinf_native.h new file mode 100644 index 0000000000000000000000000000000000000000..81cd9876809cd4eb0e397c0365a778268cf8c4e0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/isinf_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor isinf(const at::Tensor & self); +TORCH_API at::Tensor & isinf_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor isinf_sparse(const at::Tensor & self); +TORCH_API at::Tensor isinf_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor isinf_sparse_meta(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/multinomial.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/multinomial.h new file mode 100644 index 0000000000000000000000000000000000000000..d9937527551ce9a95604e4706276f13454d4f4ca --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/multinomial.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & multinomial_out(at::Tensor & out, const at::Tensor & self, int64_t num_samples, bool replacement=false, ::std::optional generator=::std::nullopt) { + return at::_ops::multinomial_out::call(self, num_samples, replacement, generator, out); +} +// aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & multinomial_outf(const at::Tensor & self, int64_t num_samples, bool replacement, ::std::optional generator, at::Tensor & out) { + return at::_ops::multinomial_out::call(self, num_samples, replacement, generator, out); +} + +// aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor +inline at::Tensor multinomial(const at::Tensor & self, int64_t num_samples, bool replacement=false, ::std::optional generator=::std::nullopt) { + return at::_ops::multinomial::call(self, num_samples, replacement, generator); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/native_channel_shuffle_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/native_channel_shuffle_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..08679281866b4d100c9606bc0cb8f277da941204 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/native_channel_shuffle_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API native_channel_shuffle { + using schema = at::Tensor (const at::Tensor &, c10::SymInt); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::native_channel_shuffle") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "native_channel_shuffle(Tensor self, SymInt groups) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymInt groups); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt groups); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/ne.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/ne.h new file mode 100644 index 0000000000000000000000000000000000000000..59cb36f6f4f1e16fcee2bfdd938556d535a5fd6c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/ne.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ne_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::ne_Scalar_out::call(self, other, out); +} +// aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ne_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::ne_Scalar_out::call(self, other, out); +} + +// aten::ne.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor ne(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::ne_Scalar::call(self, other); +} + +// aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ne_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ne_Tensor_out::call(self, other, out); +} +// aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ne_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::ne_Tensor_out::call(self, other, out); +} + +// aten::ne.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor ne(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ne_Tensor::call(self, other); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_backward_cpu_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..611f2e98dc5d8fdd8e45a8a25eef7cfbdf93c3fc --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_backward_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor nll_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight); +TORCH_API at::Tensor nll_loss_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight); +TORCH_API at::Tensor & nll_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight); +TORCH_API at::Tensor & nll_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input); +TORCH_API at::Tensor & nll_loss_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight); +TORCH_API at::Tensor & nll_loss_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/nuclear_norm_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/nuclear_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ef594d0dbbe0ce77d09390f9ebc5a173a1c8f87f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/nuclear_norm_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API nuclear_norm { + using schema = at::Tensor (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nuclear_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nuclear_norm(Tensor self, bool keepdim=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool keepdim); +}; + +struct TORCH_API nuclear_norm_out { + using schema = at::Tensor & (const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nuclear_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool keepdim, at::Tensor & out); +}; + +struct TORCH_API nuclear_norm_dim { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nuclear_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dim") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim); +}; + +struct TORCH_API nuclear_norm_dim_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::nuclear_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dim_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/polygamma.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/polygamma.h new file mode 100644 index 0000000000000000000000000000000000000000..beef10384552010c6ff00a083d6692e2c5dfb2bf --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/polygamma.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & polygamma_out(at::Tensor & out, int64_t n, const at::Tensor & self) { + return at::_ops::polygamma_out::call(n, self, out); +} +// aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & polygamma_outf(int64_t n, const at::Tensor & self, at::Tensor & out) { + return at::_ops::polygamma_out::call(n, self, out); +} + +// aten::polygamma(int n, Tensor self) -> Tensor +inline at::Tensor polygamma(int64_t n, const at::Tensor & self) { + return at::_ops::polygamma::call(n, self); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/range_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/range_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..10f3abe30d1824e54681fb603d101e726bfe290e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/range_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor range(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step=1, at::TensorOptions options={}); +TORCH_API at::Tensor range(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor range(const at::Scalar & start, const at::Scalar & end, at::TensorOptions options={}); +TORCH_API at::Tensor range(const at::Scalar & start, const at::Scalar & end, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory); +TORCH_API at::Tensor & range_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end); +TORCH_API at::Tensor & range_outf(const at::Scalar & start, const at::Scalar & end, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad2d_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad2d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ff1fb12bd549fde4bffc639a995a286fe87a655a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad2d_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor reflection_pad2d(const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor reflection_pad2d_symint(const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & reflection_pad2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & reflection_pad2d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out); +TORCH_API at::Tensor & reflection_pad2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & reflection_pad2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/select_backward_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/select_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5943043edf69bcf684e26b0f8d6aeb7c3455fcb1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/select_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & select_backward_out(at::Tensor & out, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index); +TORCH_API at::Tensor & select_backward_outf(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index, at::Tensor & out); +TORCH_API at::Tensor & select_backward_symint_out(at::Tensor & out, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index); +TORCH_API at::Tensor & select_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sign_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sign_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e93ccf0367d83c3eeae71196b1fa051992eba8da --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sign_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sign { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sign") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sign(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API sign_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sign_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sign_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API sign_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sign") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/silu_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/silu_native.h new file mode 100644 index 0000000000000000000000000000000000000000..afdf417eaba32fa0b5a80adf239cee54763c9ac6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/silu_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_silu_out : public at::meta::structured_silu { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor NestedTensor_silu(const at::Tensor & self); +TORCH_API at::Tensor & NestedTensor_silu_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_bsc_tensor_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_bsc_tensor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5f778681f98f650078679dae6c22c254858e5f56 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_bsc_tensor_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +TORCH_API at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j0_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j0_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1067bdad16589995b51a1e6b3c5ad2024fbfb766 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j0_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor special_bessel_j0(const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_j0_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_j0_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k0_compositeexplicitautogradnonfunctional_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k0_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..40d3fd1a0ea358b4ab6626f3f21c5f2ceabaf8ab --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k0_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_scaled_modified_bessel_k0(const at::Tensor & x); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/split_with_sizes_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/split_with_sizes_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a89b80d7d030d2e10abebac15462bb8d9d786f0e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/split_with_sizes_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::vector split_with_sizes(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0); +TORCH_API ::std::vector split_with_sizes_symint(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/unbind.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/unbind.h new file mode 100644 index 0000000000000000000000000000000000000000..b540322d4810c3c7859de27c9ed710ac610a895a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/unbind.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[] +inline ::std::vector unbind(const at::Tensor & self, int64_t dim=0) { + return at::_ops::unbind_int::call(self, dim); +} + +// aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[] +inline ::std::vector unbind(const at::Tensor & self, at::Dimname dim) { + return at::_ops::unbind_Dimname::call(self, dim); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/unbind_copy_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/unbind_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9a9b43287db06723efe7ececb9a7b716e41dc677 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/unbind_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API unbind_copy_int { + using schema = ::std::vector (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::unbind_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "unbind_copy.int(Tensor self, int dim=0) -> Tensor[]") + static ::std::vector call(const at::Tensor & self, int64_t dim); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim); +}; + +struct TORCH_API unbind_copy_int_out { + using schema = void (const at::Tensor &, int64_t, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::unbind_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> ()") + static void call(const at::Tensor & self, int64_t dim, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_complex_copy_compositeexplicitautogradnonfunctional_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_complex_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9c1fac4b58f54721fccdd1678cf1a57067388a4a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_complex_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor view_as_complex_copy(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at