diff --git a/evalkit_cambrian/lib/python3.10/site-packages/imageio/config/__init__.py b/evalkit_cambrian/lib/python3.10/site-packages/imageio/config/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ca78dd22f2e690bc5115565e9e0c11b67929031c --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/imageio/config/__init__.py @@ -0,0 +1,16 @@ +from .extensions import ( + extension_list, + known_extensions, + FileExtension, + video_extensions, +) +from .plugins import known_plugins, PluginConfig + +__all__ = [ + "known_plugins", + "PluginConfig", + "extension_list", + "known_extensions", + "FileExtension", + "video_extensions", +] diff --git a/evalkit_cambrian/lib/python3.10/site-packages/imageio/config/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/imageio/config/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64504d2b5490f6113e24c6b13cf7002089f11dcb Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/imageio/config/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/imageio/config/__pycache__/extensions.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/imageio/config/__pycache__/extensions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59ef58c5f1bc24f7bae1fb2b640a76b0c55d3151 Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/imageio/config/__pycache__/extensions.cpython-310.pyc differ diff --git a/evalkit_cambrian/lib/python3.10/site-packages/imageio/config/extensions.py b/evalkit_cambrian/lib/python3.10/site-packages/imageio/config/extensions.py new file mode 100644 index 0000000000000000000000000000000000000000..00c716246ad5683dfbf505c9347561ab5c13bd1c --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/imageio/config/extensions.py @@ -0,0 +1,2002 @@ +""" +A set of objects representing each file extension recognized by ImageIO. If an +extension is not listed here it is still supported, as long as there exists a +supporting backend. + +""" + + +class FileExtension: + """File Extension Metadata + + This class holds information about a image file format associated with a + given extension. This information is used to track plugins that are known to + be able to handle a particular format. It also contains additional + information about a format, which is used when creating the supported format + docs. + + Plugins known to be able to handle this format are ordered by a ``priority`` + list. This list is used to determine the ideal plugin to use when choosing a + plugin based on file extension. + + Parameters + ---------- + extension : str + The name of the extension including the initial dot, e.g. ".png". + priority : List + A list of plugin names (entries in config.known_plugins) that can handle + this format. The position of a plugin expresses a preference, e.g. + ["plugin1", "plugin2"] indicates that, if available, plugin1 should be + preferred over plugin2 when handling a request related to this format. + name : str + The full name of the format. + description : str + A description of the format. + external_link : str + A link to further information about the format. Typically, the format's + specification. + volume_support : str + If True, the format/extension supports volumetric image data. + + Examples + -------- + >>> FileExtension( + name="Bitmap", + extension=".bmp", + priority=["pillow", "BMP-PIL", "BMP-FI", "ITK"], + external_link="https://en.wikipedia.org/wiki/BMP_file_format", + ) + + """ + + def __init__( + self, + *, + extension, + priority, + name=None, + description=None, + external_link=None, + volume_support=False + ): + self.extension = extension + self.priority = priority + self.name = name + self.description = description + self.external_link = external_link + self.default_priority = priority.copy() + self.volume_support = volume_support + + def reset(self): + self.priority = self.default_priority.copy() + + +extension_list = [ + FileExtension( + name="Hasselblad raw", + extension=".3fr", + priority=["RAW-FI"], + ), + FileExtension( + name="Sony alpha", + extension=".arw", + priority=["RAW-FI"], + ), + FileExtension( + name="Animated Portable Network Graphics", + external_link="https://en.wikipedia.org/wiki/APNG", + extension=".apng", + priority=["pillow", "pyav"], + ), + FileExtension( + name="Audio Video Interleave", + extension=".avi", + priority=["FFMPEG"], + ), + FileExtension( + name="Casio raw format", + extension=".bay", + priority=["RAW-FI"], + ), + FileExtension( + extension=".blp", + priority=["pillow"], + ), + FileExtension( + name="Bitmap", + extension=".bmp", + priority=["pillow", "BMP-PIL", "BMP-FI", "ITK", "pyav", "opencv"], + external_link="https://en.wikipedia.org/wiki/BMP_file_format", + ), + FileExtension( + name="Device-Independent Bitmap", + extension=".dip", + priority=["opencv"], + external_link="https://en.wikipedia.org/wiki/BMP_file_format", + ), + FileExtension( + name="Re-Volt mipmap", + extension=".bmq", + priority=["RAW-FI"], + ), + FileExtension( + name="Binary Structured Data Format", + extension=".bsdf", + priority=["BSDF"], + external_link="http://bsdf.io/", + ), + FileExtension( + name="Binary Universal Form for the Representation of meteorological data", + extension=".bufr", + priority=["pillow", "BUFR-PIL"], + ), + FileExtension( + name="Silicon Graphics Image", + extension=".bw", + priority=["pillow", "SGI-PIL", "SGI-FI"], + ), + FileExtension( + name="Scirra Construct", + extension=".cap", + priority=["RAW-FI"], + ), + FileExtension( + name="AMETEK High Speed Camera Format", + extension=".cine", + priority=["RAW-FI"], + external_link="https://phantomhighspeed-knowledge.secure.force.com/servlet/fileField?id=0BE1N000000kD2i#:~:text=Cine%20is%20a%20video%20file,camera%20model%20and%20image%20resolution", + ), + FileExtension(extension=".cr2", priority=["RAW-FI"]), + FileExtension( + extension=".crw", + priority=["RAW-FI"], + ), + FileExtension( + extension=".cs1", + priority=["RAW-FI"], + ), + FileExtension( + name="Computerized Tomography", + extension=".ct", + priority=["DICOM"], + ), + FileExtension( + name="Windows Cursor Icons", + extension=".cur", + priority=["pillow", "CUR-PIL"], + ), + FileExtension( + name="Dr. Halo", + extension=".cut", + priority=["CUT-FI"], + ), + FileExtension( + extension=".dc2", + priority=["RAW-FI"], + ), + FileExtension( + name="DICOM file format", + extension=".dcm", + priority=["DICOM", "ITK"], + ), + FileExtension( + extension=".dcr", + priority=["RAW-FI"], + ), + FileExtension( + name="Intel DCX", + extension=".dcx", + priority=["pillow", "DCX-PIL"], + ), + FileExtension( + name="DirectX Texture Container", + extension=".dds", + priority=["pillow", "DDS-FI", "DDS-PIL"], + ), + FileExtension( + name="Windows Bitmap", + extension=".dib", + priority=["pillow", "DIB-PIL"], + ), + FileExtension( + name="DICOM file format", + extension=".dicom", + priority=["ITK"], + ), + FileExtension( + extension=".dng", + priority=["RAW-FI"], + ), + FileExtension( + extension=".drf", + priority=["RAW-FI"], + ), + FileExtension( + extension=".dsc", + priority=["RAW-FI"], + ), + FileExtension( + name="Enhanced Compression Wavelet", + extension=".ecw", + priority=["GDAL"], + ), + FileExtension( + name="Windows Metafile", + extension=".emf", + priority=["pillow", "WMF-PIL"], + ), + FileExtension( + name="Encapsulated Postscript", + extension=".eps", + priority=["pillow", "EPS-PIL"], + ), + FileExtension( + extension=".erf", + priority=["RAW-FI"], + ), + FileExtension( + name="OpenEXR", + extension=".exr", + external_link="https://openexr.readthedocs.io/en/latest/", + priority=["EXR-FI", "pyav", "opencv"], + ), + FileExtension( + extension=".fff", + priority=["RAW-FI"], + ), + FileExtension( + name="Flexible Image Transport System File", + extension=".fit", + priority=["pillow", "FITS-PIL", "FITS"], + ), + FileExtension( + name="Flexible Image Transport System File", + extension=".fits", + priority=["pillow", "FITS-PIL", "FITS", "pyav"], + ), + FileExtension( + name="Autodesk FLC Animation", + extension=".flc", + priority=["pillow", "FLI-PIL"], + ), + FileExtension( + name="Autodesk FLI Animation", + extension=".fli", + priority=["pillow", "FLI-PIL"], + ), + FileExtension( + name="Kodak FlashPix", + extension=".fpx", + priority=["pillow", "FPX-PIL"], + ), + FileExtension( + name="Independence War 2: Edge Of Chaos Texture Format", + extension=".ftc", + priority=["pillow", "FTEX-PIL"], + ), + FileExtension( + name="Flexible Image Transport System File", + extension=".fts", + priority=["FITS"], + ), + FileExtension( + name="Independence War 2: Edge Of Chaos Texture Format", + extension=".ftu", + priority=["pillow", "FTEX-PIL"], + ), + FileExtension( + name="Flexible Image Transport System File", + extension=".fz", + priority=["FITS"], + ), + FileExtension( + name="Raw fax format CCITT G.3", + extension=".g3", + priority=["G3-FI"], + ), + FileExtension( + name="GIMP brush file", + extension=".gbr", + priority=["pillow", "GBR-PIL"], + ), + FileExtension( + name="Grassroots DICOM", + extension=".gdcm", + priority=["ITK"], + ), + FileExtension( + name="Graphics Interchange Format", + extension=".gif", + priority=["pillow", "GIF-PIL", "pyav"], + ), + FileExtension( + name="UMDS GIPL", + extension=".gipl", + priority=["ITK"], + ), + FileExtension( + name="gridded meteorological data", + extension=".grib", + priority=["pillow", "GRIB-PIL"], + ), + FileExtension( + name="Hierarchical Data Format 5", + extension=".h5", + priority=["pillow", "HDF5-PIL"], + ), + FileExtension( + name="Hierarchical Data Format 5", + extension=".hdf", + priority=["pillow", "HDF5-PIL"], + ), + FileExtension( + name="Hierarchical Data Format 5", + extension=".hdf5", + priority=["ITK"], + ), + FileExtension( + name="JPEG Extended Range", + extension=".hdp", + priority=["JPEG-XR-FI"], + ), + FileExtension( + name="High Dynamic Range Image", + extension=".hdr", + priority=["HDR-FI", "ITK", "opencv"], + ), + FileExtension( + extension=".ia", + priority=["RAW-FI"], + ), + FileExtension( + extension=".icb", + priority=["pillow"], + ), + FileExtension( + name="Mac OS Icon File", + extension=".icns", + priority=["pillow", "ICNS-PIL"], + ), + FileExtension( + name="Windows Icon File", + extension=".ico", + priority=["pillow", "ICO-FI", "ICO-PIL", "pyav"], + ), + FileExtension( + name="ILBM Interleaved Bitmap", + extension=".iff", + priority=["IFF-FI"], + ), + FileExtension( + name="IPTC/NAA", + extension=".iim", + priority=["pillow", "IPTC-PIL"], + ), + FileExtension( + extension=".iiq", + priority=["RAW-FI"], + ), + FileExtension( + name="IFUNC Image Memory", + extension=".im", + priority=["pillow", "IM-PIL"], + ), + FileExtension( + extension=".img", + priority=["ITK", "GDAL"], + ), + FileExtension( + extension=".img.gz", + priority=["ITK"], + ), + FileExtension( + name="IM Tools", + extension=".IMT", + priority=["pillow", "IMT-PIL"], + ), + FileExtension( + name="Image Processing Lab", + extension=".ipl", + priority=["ITK"], + ), + FileExtension( + name="JPEG 2000", + extension=".j2c", + priority=["pillow", "J2K-FI", "JPEG2000-PIL", "pyav"], + ), + FileExtension( + name="JPEG 2000", + extension=".j2k", + priority=["pillow", "J2K-FI", "JPEG2000-PIL", "pyav"], + ), + FileExtension( + name="JPEG", + extension=".jfif", + priority=["pillow", "JPEG-PIL"], + ), + FileExtension( + name="JPEG", + extension=".jif", + priority=["JPEG-FI"], + ), + FileExtension( + name="JPEG Network Graphics", + extension=".jng", + priority=["JNG-FI"], + ), + FileExtension( + name="JPEG 2000", + extension=".jp2", + priority=["pillow", "JP2-FI", "JPEG2000-PIL", "pyav", "opencv"], + ), + FileExtension( + name="JPEG 2000", + extension=".jpc", + priority=["pillow", "JPEG2000-PIL"], + ), + FileExtension( + name="JPEG", + extension=".jpe", + priority=["pillow", "JPEG-FI", "JPEG-PIL", "opencv"], + ), + FileExtension( + name="Joint Photographic Experts Group", + extension=".jpeg", + priority=["pillow", "JPEG-PIL", "JPEG-FI", "ITK", "GDAL", "pyav", "opencv"], + ), + FileExtension( + name="JPEG 2000", + extension=".jpf", + priority=["pillow", "JPEG2000-PIL"], + ), + FileExtension( + name="Joint Photographic Experts Group", + extension=".jpg", + priority=["pillow", "JPEG-PIL", "JPEG-FI", "ITK", "GDAL", "pyav", "opencv"], + ), + FileExtension( + name="JPEG 2000", + extension=".jpx", + priority=["pillow", "JPEG2000-PIL"], + ), + FileExtension( + name="JPEG Extended Range", + extension=".jxr", + priority=["JPEG-XR-FI"], + ), + FileExtension( + extension=".k25", + priority=["RAW-FI"], + ), + FileExtension( + extension=".kc2", + priority=["RAW-FI"], + ), + FileExtension( + extension=".kdc", + priority=["RAW-FI"], + ), + FileExtension( + name="C64 Koala Graphics", + extension=".koa", + priority=["KOALA-FI"], + ), + FileExtension( + name="ILBM Interleaved Bitmap", + extension=".lbm", + priority=["IFF-FI"], + ), + FileExtension( + name="Lytro F01", + extension=".lfp", + priority=["LYTRO-LFP"], + ), + FileExtension( + name="Lytro Illum", + extension=".lfr", + priority=["LYTRO-LFR"], + ), + FileExtension( + name="ZEISS LSM", + extension=".lsm", + priority=["tifffile", "ITK", "TIFF"], + ), + FileExtension( + name="McIdas area file", + extension=".MCIDAS", + priority=["pillow", "MCIDAS-PIL"], + external_link="https://www.ssec.wisc.edu/mcidas/doc/prog_man/2003print/progman2003-formats.html", + ), + FileExtension( + extension=".mdc", + priority=["RAW-FI"], + ), + FileExtension( + extension=".mef", + priority=["RAW-FI"], + ), + FileExtension( + name="FreeSurfer File Format", + extension=".mgh", + priority=["ITK"], + ), + FileExtension( + name="ITK MetaImage", + extension=".mha", + priority=["ITK"], + ), + FileExtension( + name="ITK MetaImage Header", + extension=".mhd", + priority=["ITK"], + ), + FileExtension( + name="Microsoft Image Composer", + extension=".mic", + priority=["pillow", "MIC-PIL"], + ), + FileExtension( + name="Matroska Multimedia Container", + extension=".mkv", + priority=["FFMPEG", "pyav"], + ), + FileExtension( + name="Medical Imaging NetCDF", + extension=".mnc", + priority=["ITK"], + ), + FileExtension( + name="Medical Imaging NetCDF 2", + extension=".mnc2", + priority=["ITK"], + ), + FileExtension( + name="Leaf Raw Image Format", + extension=".mos", + priority=["RAW-FI"], + ), + FileExtension( + name="QuickTime File Format", + extension=".mov", + priority=["FFMPEG", "pyav"], + ), + FileExtension( + name="MPEG-4 Part 14", + extension=".mp4", + priority=["FFMPEG", "pyav"], + ), + FileExtension( + name="MPEG-1 Moving Picture Experts Group", + extension=".mpeg", + priority=["FFMPEG", "pyav"], + ), + FileExtension( + name="Moving Picture Experts Group", + extension=".mpg", + priority=["pillow", "FFMPEG", "pyav"], + ), + FileExtension( + name="JPEG Multi-Picture Format", + extension=".mpo", + priority=["pillow", "MPO-PIL"], + ), + FileExtension( + name="Magnetic resonance imaging", + extension=".mri", + priority=["DICOM"], + ), + FileExtension( + extension=".mrw", + priority=["RAW-FI"], + ), + FileExtension( + name="Windows Paint", + extension=".msp", + priority=["pillow", "MSP-PIL"], + ), + FileExtension( + extension=".nef", + priority=["RAW-FI", "rawpy"], + ), + FileExtension( + extension=".nhdr", + priority=["ITK"], + ), + FileExtension( + extension=".nia", + priority=["ITK"], + ), + FileExtension( + extension=".nii", + priority=["ITK"], + ), + FileExtension( + name="nii.gz", + extension=".nii.gz", + priority=["ITK"], + ), + FileExtension( + name="Numpy Array", + extension=".npz", + priority=["NPZ"], + volume_support=True, + ), + FileExtension( + extension=".nrrd", + priority=["ITK"], + ), + FileExtension( + extension=".nrw", + priority=["RAW-FI"], + ), + FileExtension( + extension=".orf", + priority=["RAW-FI"], + ), + FileExtension( + extension=".palm", + priority=["pillow"], + ), + FileExtension( + name="Portable Bitmap", + extension=".pbm", + priority=["PGM-FI", "PGMRAW-FI", "pyav", "opencv"], + ), + FileExtension( + name="Kodak PhotoCD", + extension=".pcd", + priority=["pillow", "PCD-FI", "PCD-PIL"], + ), + FileExtension( + name="Macintosh PICT", + extension=".pct", + priority=["PICT-FI"], + ), + FileExtension( + name="Zsoft Paintbrush", + extension=".PCX", + priority=["pillow", "PCX-FI", "PCX-PIL"], + ), + FileExtension( + extension=".pdf", + priority=["pillow"], + ), + FileExtension( + extension=".pef", + priority=["RAW-FI"], + ), + FileExtension( + extension=".pfm", + priority=["PFM-FI", "pyav", "opencv"], + ), + FileExtension( + name="Portable Greymap", + extension=".pgm", + priority=["pillow", "PGM-FI", "PGMRAW-FI", "pyav", "opencv"], + ), + FileExtension( + name="Macintosh PICT", + extension=".pic", + priority=["PICT-FI", "ITK", "opencv"], + ), + FileExtension( + name="Macintosh PICT", + extension=".pict", + priority=["PICT-FI"], + ), + FileExtension( + name="Portable Network Graphics", + extension=".png", + priority=["pillow", "PNG-PIL", "PNG-FI", "ITK", "pyav", "opencv"], + ), + FileExtension( + name="Portable Image Format", + extension=".pnm", + priority=["pillow", "opencv"], + ), + FileExtension( + name="Pbmplus image", + extension=".ppm", + priority=["pillow", "PPM-PIL", "pyav"], + ), + FileExtension( + name="Pbmplus image", + extension=".pbm", + priority=["pillow", "PPM-PIL", "PPM-FI"], + ), + FileExtension( + name="Portable image format", + extension=".pxm", + priority=["opencv"], + ), + FileExtension( + name="Portable Pixelmap (ASCII)", + extension=".ppm", + priority=["PPM-FI", "opencv"], + ), + FileExtension( + name="Portable Pixelmap (Raw)", + extension=".ppm", + priority=["PPMRAW-FI"], + ), + FileExtension( + name="Ghostscript", + extension=".ps", + priority=["pillow", "EPS-PIL"], + ), + FileExtension( + name="Adope Photoshop 2.5 and 3.0", + extension=".psd", + priority=["pillow", "PSD-PIL", "PSD-FI"], + ), + FileExtension( + extension=".ptx", + priority=["RAW-FI"], + ), + FileExtension( + extension=".pxn", + priority=["RAW-FI"], + ), + FileExtension( + name="PIXAR raster image", + extension=".pxr", + priority=["pillow", "PIXAR-PIL"], + ), + FileExtension( + extension=".qtk", + priority=["RAW-FI"], + ), + FileExtension( + extension=".raf", + priority=["RAW-FI"], + ), + FileExtension( + name="Sun Raster File", + extension=".ras", + priority=["pillow", "SUN-PIL", "RAS-FI", "pyav", "opencv"], + ), + FileExtension( + name="Sun Raster File", + extension=".sr", + priority=["opencv"], + ), + FileExtension( + extension=".raw", + priority=["RAW-FI", "LYTRO-ILLUM-RAW", "LYTRO-F01-RAW", "rawpy"], + ), + FileExtension( + extension=".rdc", + priority=["RAW-FI"], + ), + FileExtension( + name="Silicon Graphics Image", + extension=".rgb", + priority=["pillow", "SGI-PIL"], + ), + FileExtension( + name="Silicon Graphics Image", + extension=".rgba", + priority=["pillow", "SGI-PIL"], + ), + FileExtension( + extension=".rw2", + priority=["RAW-FI"], + ), + FileExtension( + extension=".rwl", + priority=["RAW-FI"], + ), + FileExtension( + extension=".rwz", + priority=["RAW-FI"], + ), + FileExtension( + name="Silicon Graphics Image", + extension=".sgi", + priority=["pillow", "SGI-PIL", "pyav"], + ), + FileExtension( + name="SPE File Format", + extension=".spe", + priority=["SPE"], + ), + FileExtension( + extension=".SPIDER", + priority=["pillow", "SPIDER-PIL"], + ), + FileExtension( + extension=".sr2", + priority=["RAW-FI"], + ), + FileExtension( + extension=".srf", + priority=["RAW-FI"], + ), + FileExtension( + extension=".srw", + priority=["RAW-FI"], + ), + FileExtension( + extension=".sti", + priority=["RAW-FI"], + ), + FileExtension( + extension=".stk", + priority=["tifffile", "TIFF"], + ), + FileExtension( + name="ShockWave Flash", + extension=".swf", + priority=["SWF", "pyav"], + ), + FileExtension( + name="Truevision TGA", + extension=".targa", + priority=["pillow", "TARGA-FI"], + ), + FileExtension( + name="Truevision TGA", + extension=".tga", + priority=["pillow", "TGA-PIL", "TARGA-FI", "pyav"], + ), + FileExtension( + name="Tagged Image File", + extension=".tif", + priority=[ + "tifffile", + "TIFF", + "pillow", + "TIFF-PIL", + "TIFF-FI", + "FEI", + "ITK", + "GDAL", + "pyav", + "opencv", + ], + volume_support=True, + ), + FileExtension( + name="Tagged Image File Format", + extension=".tiff", + priority=[ + "tifffile", + "TIFF", + "pillow", + "TIFF-PIL", + "TIFF-FI", + "FEI", + "ITK", + "GDAL", + "pyav", + "opencv", + ], + volume_support=True, + ), + FileExtension( + extension=".vda", + priority=["pillow"], + ), + FileExtension( + extension=".vst", + priority=["pillow"], + ), + FileExtension( + extension=".vtk", + priority=["ITK"], + ), + FileExtension( + name="Wireless Bitmap", + extension=".wap", + priority=["WBMP-FI"], + ), + FileExtension( + name="Wireless Bitmap", + extension=".wbm", + priority=["WBMP-FI"], + ), + FileExtension( + name="Wireless Bitmap", + extension=".wbmp", + priority=["WBMP-FI"], + ), + FileExtension( + name="JPEG Extended Range", + extension=".wdp", + priority=["JPEG-XR-FI"], + ), + FileExtension( + name="Matroska", + extension=".webm", + priority=["FFMPEG", "pyav"], + ), + FileExtension( + name="Google WebP", + extension=".webp", + priority=["pillow", "WEBP-FI", "pyav", "opencv"], + ), + FileExtension( + name="Windows Meta File", + extension=".wmf", + priority=["pillow", "WMF-PIL"], + ), + FileExtension( + name="Windows Media Video", + extension=".wmv", + priority=["FFMPEG"], + ), + FileExtension( + name="X11 Bitmap", + extension=".xbm", + priority=["pillow", "XBM-PIL", "XBM-FI", "pyav"], + ), + FileExtension( + name="X11 Pixel Map", + extension=".xpm", + priority=["pillow", "XPM-PIL", "XPM-FI"], + ), + FileExtension( + name="Thumbnail Image", + extension=".XVTHUMB", + priority=["pillow", "XVTHUMB-PIL"], + ), + FileExtension( + extension=".dpx", + priority=["pyav"], + ), + FileExtension( + extension=".im1", + priority=["pyav"], + ), + FileExtension( + extension=".im24", + priority=["pyav"], + ), + FileExtension( + extension=".im8", + priority=["pyav"], + ), + FileExtension( + extension=".jls", + priority=["pyav"], + ), + FileExtension( + extension=".ljpg", + priority=["pyav"], + ), + FileExtension( + extension=".pam", + priority=["pyav"], + ), + FileExtension( + extension=".pcx", + priority=["pyav"], + ), + FileExtension( + extension=".pgmyuv", + priority=["pyav"], + ), + FileExtension( + extension=".pix", + priority=["pyav"], + ), + FileExtension( + extension=".ppm", + priority=["pyav"], + ), + FileExtension( + extension=".rs", + priority=["pyav"], + ), + FileExtension( + extension=".sun", + priority=["pyav"], + ), + FileExtension( + extension=".sunras", + priority=["pyav"], + ), + FileExtension( + extension=".xface", + priority=["pyav"], + ), + FileExtension( + extension=".xwd", + priority=["pyav"], + ), + FileExtension( + extension=".y", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".3g2", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".3gp", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".f4v", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".ism", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".isma", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".ismv", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".m4a", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".m4b", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".mj2", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".psp", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".3g2", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".3gp", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".f4v", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".ism", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".isma", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".ismv", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".m4a", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".m4b", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".mj2", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".psp", + priority=["pyav"], + ), + FileExtension( + name="3GPP AMR", + extension=".amr", + priority=["pyav"], + ), + FileExtension( + name="a64 - video for Commodore 64", + extension=".A64", + priority=["pyav"], + ), + FileExtension( + name="a64 - video for Commodore 64", + extension=".a64", + priority=["pyav"], + ), + FileExtension( + name="Adobe Filmstrip", + extension=".flm", + priority=["pyav"], + ), + FileExtension( + name="AMV", + extension=".amv", + priority=["pyav"], + ), + FileExtension( + name="ASF (Advanced / Active Streaming Format)", + extension=".asf", + priority=["pyav"], + ), + FileExtension( + name="ASF (Advanced / Active Streaming Format)", + extension=".asf", + priority=["pyav"], + ), + FileExtension( + name="ASF (Advanced / Active Streaming Format)", + extension=".wmv", + priority=["pyav"], + ), + FileExtension( + name="ASF (Advanced / Active Streaming Format)", + extension=".wmv", + priority=["pyav"], + ), + FileExtension( + name="AV1 Annex B", + extension=".obu", + priority=["pyav"], + ), + FileExtension( + name="AV1 low overhead OBU", + extension=".obu", + priority=["pyav"], + ), + FileExtension( + name="AVI (Audio Video Interleaved)", + extension=".avi", + priority=["pyav"], + ), + FileExtension( + name="AVR (Audio Visual Research)", + extension=".avr", + priority=["pyav"], + ), + FileExtension( + name="Beam Software SIFF", + extension=".vb", + priority=["pyav"], + ), + FileExtension( + name="CD Graphics", + extension=".cdg", + priority=["pyav"], + ), + FileExtension( + name="Commodore CDXL video", + extension=".cdxl", + priority=["pyav"], + ), + FileExtension( + name="Commodore CDXL video", + extension=".xl", + priority=["pyav"], + ), + FileExtension( + name="DASH Muxer", + extension=".mpd", + priority=["pyav"], + ), + FileExtension( + name="Digital Pictures SGA", + extension=".sga", + priority=["pyav"], + ), + FileExtension( + name="Discworld II BMV", + extension=".bmv", + priority=["pyav"], + ), + FileExtension( + name="DV (Digital Video)", + extension=".dif", + priority=["pyav"], + ), + FileExtension( + name="DV (Digital Video)", + extension=".dv", + priority=["pyav"], + ), + FileExtension( + name="F4V Adobe Flash Video", + extension=".f4v", + priority=["pyav"], + ), + FileExtension( + name="FLV (Flash Video)", + extension=".flv", + priority=["pyav"], + ), + FileExtension( + name="GXF (General eXchange Format)", + extension=".gxf", + priority=["pyav"], + ), + FileExtension( + name="iCE Draw File", + extension=".idf", + priority=["pyav"], + ), + FileExtension( + name="IFV CCTV DVR", + extension=".ifv", + priority=["pyav"], + ), + FileExtension( + name="iPod H.264 MP4 (MPEG-4 Part 14)", + extension=".m4a", + priority=["pyav"], + ), + FileExtension( + name="iPod H.264 MP4 (MPEG-4 Part 14)", + extension=".m4b", + priority=["pyav"], + ), + FileExtension( + name="iPod H.264 MP4 (MPEG-4 Part 14)", + extension=".m4v", + priority=["pyav"], + ), + FileExtension( + name="IVR (Internet Video Recording)", + extension=".ivr", + priority=["pyav"], + ), + FileExtension( + name="Konami PS2 SVAG", + extension=".svag", + priority=["pyav"], + ), + FileExtension( + name="KUX (YouKu)", + extension=".kux", + priority=["pyav"], + ), + FileExtension( + name="live RTMP FLV (Flash Video)", + extension=".flv", + priority=["pyav"], + ), + FileExtension( + name="Loki SDL MJPEG", + extension=".mjpg", + priority=["pyav"], + ), + FileExtension( + name="LVF", + extension=".lvf", + priority=["pyav"], + ), + FileExtension( + name="Matroska / WebM", + extension=".mk3d", + priority=["pyav"], + ), + FileExtension( + name="Matroska / WebM", + extension=".mka", + priority=["pyav"], + ), + FileExtension( + name="Matroska / WebM", + extension=".mks", + priority=["pyav"], + ), + FileExtension( + name="Microsoft XMV", + extension=".xmv", + priority=["pyav"], + ), + FileExtension( + name="MIME multipart JPEG", + extension=".mjpg", + priority=["pyav"], + ), + FileExtension( + name="MobiClip MODS", + extension=".mods", + priority=["pyav"], + ), + FileExtension( + name="MobiClip MOFLEX", + extension=".moflex", + priority=["pyav"], + ), + FileExtension( + name="Motion Pixels MVI", + extension=".mvi", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".3g2", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".3gp", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".f4v", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".ism", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".isma", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".ismv", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".m4a", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".m4b", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".mj2", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".psp", + priority=["pyav"], + ), + FileExtension( + name="MPEG-2 PS (DVD VOB)", + extension=".dvd", + priority=["pyav"], + ), + FileExtension( + name="MPEG-2 PS (SVCD)", + extension=".vob", + priority=["pyav"], + ), + FileExtension( + name="MPEG-2 PS (VOB)", + extension=".vob", + priority=["pyav"], + ), + FileExtension( + name="MPEG-TS (MPEG-2 Transport Stream)", + extension=".m2t", + priority=["pyav"], + ), + FileExtension( + name="MPEG-TS (MPEG-2 Transport Stream)", + extension=".m2ts", + priority=["pyav"], + ), + FileExtension( + name="MPEG-TS (MPEG-2 Transport Stream)", + extension=".mts", + priority=["pyav"], + ), + FileExtension( + name="MPEG-TS (MPEG-2 Transport Stream)", + extension=".ts", + priority=["pyav"], + ), + FileExtension( + name="Musepack", + extension=".mpc", + priority=["pyav"], + ), + FileExtension( + name="MXF (Material eXchange Format) Operational Pattern Atom", + extension=".mxf", + priority=["pyav"], + ), + FileExtension( + name="MXF (Material eXchange Format)", + extension=".mxf", + priority=["pyav"], + ), + FileExtension( + name="MxPEG clip", + extension=".mxg", + priority=["pyav"], + ), + FileExtension( + name="NC camera feed", + extension=".v", + priority=["pyav"], + ), + FileExtension( + name="NUT", + extension=".nut", + priority=["pyav"], + ), + FileExtension( + name="Ogg Video", + extension=".ogv", + priority=["pyav"], + ), + FileExtension( + name="Ogg", + extension=".ogg", + priority=["pyav"], + ), + FileExtension( + name="On2 IVF", + extension=".ivf", + priority=["pyav"], + ), + FileExtension( + name="PSP MP4 (MPEG-4 Part 14)", + extension=".psp", + priority=["pyav"], + ), + FileExtension( + name="Psygnosis YOP", + extension=".yop", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".3g2", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".3gp", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".f4v", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".ism", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".isma", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".ismv", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".m4a", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".m4b", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".mj2", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".psp", + priority=["pyav"], + ), + FileExtension( + name="raw AVS2-P2/IEEE1857.4 video", + extension=".avs", + priority=["pyav"], + ), + FileExtension( + name="raw AVS2-P2/IEEE1857.4 video", + extension=".avs2", + priority=["pyav"], + ), + FileExtension( + name="raw AVS3-P2/IEEE1857.10", + extension=".avs3", + priority=["pyav"], + ), + FileExtension( + name="raw Chinese AVS (Audio Video Standard) video", + extension=".cavs", + priority=["pyav"], + ), + FileExtension( + name="raw Dirac", + extension=".drc", + priority=["pyav"], + ), + FileExtension( + name="raw Dirac", + extension=".vc2", + priority=["pyav"], + ), + FileExtension( + name="raw DNxHD (SMPTE VC-3)", + extension=".dnxhd", + priority=["pyav"], + ), + FileExtension( + name="raw DNxHD (SMPTE VC-3)", + extension=".dnxhr", + priority=["pyav"], + ), + FileExtension( + name="raw GSM", + extension=".gsm", + priority=["pyav"], + ), + FileExtension( + name="raw H.261", + extension=".h261", + priority=["pyav"], + ), + FileExtension( + name="raw H.263", + extension=".h263", + priority=["pyav"], + ), + FileExtension( + name="raw H.264 video", + extension=".264", + priority=["pyav"], + ), + FileExtension( + name="raw H.264 video", + extension=".avc", + priority=["pyav"], + ), + FileExtension( + name="raw H.264 video", + extension=".h264", + priority=["pyav", "FFMPEG"], + ), + FileExtension( + name="raw H.264 video", + extension=".h26l", + priority=["pyav"], + ), + FileExtension( + name="raw HEVC video", + extension=".265", + priority=["pyav"], + ), + FileExtension( + name="raw HEVC video", + extension=".h265", + priority=["pyav"], + ), + FileExtension( + name="raw HEVC video", + extension=".hevc", + priority=["pyav"], + ), + FileExtension( + name="raw id RoQ", + extension=".roq", + priority=["pyav"], + ), + FileExtension( + name="raw Ingenient MJPEG", + extension=".cgi", + priority=["pyav"], + ), + FileExtension( + name="raw IPU Video", + extension=".ipu", + priority=["pyav"], + ), + FileExtension( + name="raw MJPEG 2000 video", + extension=".j2k", + priority=["pyav"], + ), + FileExtension( + name="raw MJPEG video", + extension=".mjpeg", + priority=["pyav"], + ), + FileExtension( + name="raw MJPEG video", + extension=".mjpg", + priority=["pyav"], + ), + FileExtension( + name="raw MJPEG video", + extension=".mpo", + priority=["pyav"], + ), + FileExtension( + name="raw MPEG-1 video", + extension=".m1v", + priority=["pyav"], + ), + FileExtension( + name="raw MPEG-1 video", + extension=".mpeg", + priority=["pyav"], + ), + FileExtension( + name="raw MPEG-1 video", + extension=".mpg", + priority=["pyav"], + ), + FileExtension( + name="raw MPEG-2 video", + extension=".m2v", + priority=["pyav"], + ), + FileExtension( + name="raw MPEG-4 video", + extension=".m4v", + priority=["pyav"], + ), + FileExtension( + name="raw VC-1 video", + extension=".vc1", + priority=["pyav"], + ), + FileExtension( + name="raw video", + extension=".cif", + priority=["pyav"], + ), + FileExtension( + name="raw video", + extension=".qcif", + priority=["pyav"], + ), + FileExtension( + name="raw video", + extension=".rgb", + priority=["pyav"], + ), + FileExtension( + name="raw video", + extension=".yuv", + priority=["pyav"], + ), + FileExtension( + name="RealMedia", + extension=".rm", + priority=["pyav"], + ), + FileExtension( + name="SDR2", + extension=".sdr2", + priority=["pyav"], + ), + FileExtension( + name="Sega FILM / CPK", + extension=".cpk", + priority=["pyav"], + ), + FileExtension( + name="SER (Simple uncompressed video format for astronomical capturing)", + extension=".ser", + priority=["pyav"], + ), + FileExtension( + name="Simbiosis Interactive IMX", + extension=".imx", + priority=["pyav"], + ), + FileExtension( + name="Square SVS", + extension=".svs", + priority=["tifffile", "pyav"], + ), + FileExtension( + name="TiVo TY Stream", + extension=".ty", + priority=["pyav"], + ), + FileExtension( + name="TiVo TY Stream", + extension=".ty+", + priority=["pyav"], + ), + FileExtension( + name="Uncompressed 4:2:2 10-bit", + extension=".v210", + priority=["pyav"], + ), + FileExtension( + name="Uncompressed 4:2:2 10-bit", + extension=".yuv10", + priority=["pyav"], + ), + FileExtension( + name="VC-1 test bitstream", + extension=".rcv", + priority=["pyav"], + ), + FileExtension( + name="Video CCTV DAT", + extension=".dat", + priority=["pyav"], + ), + FileExtension( + name="Video DAV", + extension=".dav", + priority=["pyav"], + ), + FileExtension( + name="Vivo", + extension=".viv", + priority=["pyav"], + ), + FileExtension( + name="WebM Chunk Muxer", + extension=".chk", + priority=["pyav"], + ), + FileExtension( + name="WebM", + extension=".mk3d", + priority=["pyav"], + ), + FileExtension( + name="WebM", + extension=".mka", + priority=["pyav"], + ), + FileExtension( + name="WebM", + extension=".mks", + priority=["pyav"], + ), + FileExtension( + name="Windows Television (WTV)", + extension=".wtv", + priority=["pyav"], + ), + FileExtension( + name="Xilam DERF", + extension=".adp", + priority=["pyav"], + ), + FileExtension( + name="YUV4MPEG pipe", + extension=".y4m", + priority=["pyav"], + ), + FileExtension( + extension=".qpi", + priority=["tifffile"], + ), + FileExtension( + name="PCO Camera", + extension=".pcoraw", + priority=["tifffile"], + ), + FileExtension( + name="PCO Camera", + extension=".rec", + priority=["tifffile"], + ), + FileExtension( + name="Perkin Elmer Vectra", + extension=".qptiff", + priority=["tifffile"], + ), + FileExtension( + name="Pyramid Encoded TIFF", + extension=".ptiff", + priority=["tifffile"], + ), + FileExtension( + name="Pyramid Encoded TIFF", + extension=".ptif", + priority=["tifffile"], + ), + FileExtension( + name="Opticks Gel", + extension=".gel", + priority=["tifffile"], + ), + FileExtension( + name="Zoomify Image Format", + extension=".zif", + priority=["tifffile"], + ), + FileExtension( + name="Hamamatsu Slide Scanner", + extension=".ndpi", + priority=["tifffile"], + ), + FileExtension( + name="Roche Digital Pathology", + extension=".bif", + priority=["tifffile"], + ), + FileExtension( + extension=".tf8", + priority=["tifffile"], + ), + FileExtension( + extension=".btf", + priority=["tifffile"], + ), + FileExtension( + name="High Efficiency Image File Format", + extension=".heic", + priority=["pillow"], + ), + FileExtension( + name="AV1 Image File Format", + extension=".avif", + priority=["pillow"], + ), +] +extension_list.sort(key=lambda x: x.extension) + + +known_extensions = dict() +for ext in extension_list: + if ext.extension not in known_extensions: + known_extensions[ext.extension] = list() + known_extensions[ext.extension].append(ext) + +extension_list = [ext for ext_list in known_extensions.values() for ext in ext_list] + +_video_extension_strings = [ + ".264", + ".265", + ".3g2", + ".3gp", + ".a64", + ".A64", + ".adp", + ".amr", + ".amv", + ".asf", + ".avc", + ".avi", + ".avr", + ".avs", + ".avs2", + ".avs3", + ".bmv", + ".cavs", + ".cdg", + ".cdxl", + ".cgi", + ".chk", + ".cif", + ".cpk", + ".dat", + ".dav", + ".dif", + ".dnxhd", + ".dnxhr", + ".drc", + ".dv", + ".dvd", + ".f4v", + ".flm", + ".flv", + ".gsm", + ".gxf", + ".h261", + ".h263", + ".h264", + ".h265", + ".h26l", + ".hevc", + ".idf", + ".ifv", + ".imx", + ".ipu", + ".ism", + ".isma", + ".ismv", + ".ivf", + ".ivr", + ".j2k", + ".kux", + ".lvf", + ".m1v", + ".m2t", + ".m2ts", + ".m2v", + ".m4a", + ".m4b", + ".m4v", + ".mj2", + ".mjpeg", + ".mjpg", + ".mk3d", + ".mka", + ".mks", + ".mkv", + ".mods", + ".moflex", + ".mov", + ".mp4", + ".mpc", + ".mpd", + ".mpeg", + ".mpg", + ".mpo", + ".mts", + ".mvi", + ".mxf", + ".mxg", + ".nut", + ".obu", + ".ogg", + ".ogv", + ".psp", + ".qcif", + ".rcv", + ".rgb", + ".rm", + ".roq", + ".sdr2", + ".ser", + ".sga", + ".svag", + ".svs", + ".ts", + ".ty", + ".ty+", + ".v", + ".v210", + ".vb", + ".vc1", + ".vc2", + ".viv", + ".vob", + ".webm", + ".wmv", + ".wtv", + ".xl", + ".xmv", + ".y4m", + ".yop", + ".yuv", + ".yuv10", +] +video_extensions = list() +for ext_string in _video_extension_strings: + formats = known_extensions[ext_string] + video_extensions.append(formats[0]) +video_extensions.sort(key=lambda x: x.extension) diff --git a/evalkit_cambrian/lib/python3.10/site-packages/imageio/config/plugins.py b/evalkit_cambrian/lib/python3.10/site-packages/imageio/config/plugins.py new file mode 100644 index 0000000000000000000000000000000000000000..261dcfb17794fa0695f3e4393dfe9f8ebc72d9bd --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/imageio/config/plugins.py @@ -0,0 +1,782 @@ +import importlib + +from ..core.legacy_plugin_wrapper import LegacyPlugin + + +class PluginConfig: + """Plugin Configuration Metadata + + This class holds the information needed to lazy-import plugins. + + Parameters + ---------- + name : str + The name of the plugin. + class_name : str + The name of the plugin class inside the plugin module. + module_name : str + The name of the module/package from which to import the plugin. + is_legacy : bool + If True, this plugin is a v2 plugin and will be wrapped in a + LegacyPlugin. Default: False. + package_name : str + If the given module name points to a relative module, then the package + name determines the package it is relative to. + install_name : str + The name of the optional dependency that can be used to install this + plugin if it is missing. + legacy_args : Dict + A dictionary of kwargs to pass to the v2 plugin (Format) upon construction. + + Examples + -------- + >>> PluginConfig( + name="TIFF", + class_name="TiffFormat", + module_name="imageio.plugins.tifffile", + is_legacy=True, + install_name="tifffile", + legacy_args={ + "description": "TIFF format", + "extensions": ".tif .tiff .stk .lsm", + "modes": "iIvV", + }, + ) + >>> PluginConfig( + name="pillow", + class_name="PillowPlugin", + module_name="imageio.plugins.pillow" + ) + + """ + + def __init__( + self, + name, + class_name, + module_name, + *, + is_legacy=False, + package_name=None, + install_name=None, + legacy_args=None, + ): + legacy_args = legacy_args or dict() + + self.name = name + self.class_name = class_name + self.module_name = module_name + self.package_name = package_name + + self.is_legacy = is_legacy + self.install_name = install_name or self.name + self.legacy_args = {"name": name, "description": "A legacy plugin"} + self.legacy_args.update(legacy_args) + + @property + def format(self): + """For backwards compatibility with FormatManager + + Delete when migrating to v3 + """ + if not self.is_legacy: + raise RuntimeError("Can only get format for legacy plugins.") + + module = importlib.import_module(self.module_name, self.package_name) + clazz = getattr(module, self.class_name) + return clazz(**self.legacy_args) + + @property + def plugin_class(self): + """Get the plugin class (import if needed) + + Returns + ------- + plugin_class : Any + The class that can be used to instantiate plugins. + + """ + + module = importlib.import_module(self.module_name, self.package_name) + clazz = getattr(module, self.class_name) + + if self.is_legacy: + legacy_plugin = clazz(**self.legacy_args) + + def partial_legacy_plugin(request): + return LegacyPlugin(request, legacy_plugin) + + clazz = partial_legacy_plugin + + return clazz + + +known_plugins = dict() +known_plugins["pillow"] = PluginConfig( + name="pillow", class_name="PillowPlugin", module_name="imageio.plugins.pillow" +) +known_plugins["pyav"] = PluginConfig( + name="pyav", class_name="PyAVPlugin", module_name="imageio.plugins.pyav" +) +known_plugins["opencv"] = PluginConfig( + name="opencv", class_name="OpenCVPlugin", module_name="imageio.plugins.opencv" +) +known_plugins["tifffile"] = PluginConfig( + name="tifffile", + class_name="TifffilePlugin", + module_name="imageio.plugins.tifffile_v3", +) +known_plugins["SPE"] = PluginConfig( + name="spe", class_name="SpePlugin", module_name="imageio.plugins.spe" +) +known_plugins["rawpy"] = PluginConfig( + name="rawpy", class_name="RawPyPlugin", module_name="imageio.plugins.rawpy" +) + +# Legacy plugins +# ============== +# +# Which are partly registered by format, partly by plugin, and partly by a mix +# of both. We keep the naming here for backwards compatibility. +# In v3 this should become a single entry per plugin named after the plugin +# We can choose extension-specific priority in ``config.extensions``. +# +# Note: Since python 3.7 order of insertion determines the order of dict().keys() +# This means that the order here determines the order by which plugins are +# checked during the full fallback search. We don't advertise this downstream, +# but it could be a useful thing to keep in mind to choose a sensible default +# search order. + +known_plugins["TIFF"] = PluginConfig( + name="TIFF", + class_name="TiffFormat", + module_name="imageio.plugins.tifffile", + is_legacy=True, + install_name="tifffile", + legacy_args={ + "description": "TIFF format", + "extensions": ".tif .tiff .stk .lsm", + "modes": "iIvV", + }, +) + +# PILLOW plugin formats (legacy) +PILLOW_FORMATS = [ + ("BMP", "Windows Bitmap", ".bmp", "PillowFormat"), + ("BUFR", "BUFR", ".bufr", "PillowFormat"), + ("CUR", "Windows Cursor", ".cur", "PillowFormat"), + ("DCX", "Intel DCX", ".dcx", "PillowFormat"), + ("DDS", "DirectDraw Surface", ".dds", "PillowFormat"), + ("DIB", "Windows Bitmap", "", "PillowFormat"), + ("EPS", "Encapsulated Postscript", ".ps .eps", "PillowFormat"), + ("FITS", "FITS", ".fit .fits", "PillowFormat"), + ("FLI", "Autodesk FLI/FLC Animation", ".fli .flc", "PillowFormat"), + ("FPX", "FlashPix", ".fpx", "PillowFormat"), + ("FTEX", "Texture File Format (IW2:EOC)", ".ftc .ftu", "PillowFormat"), + ("GBR", "GIMP brush file", ".gbr", "PillowFormat"), + ("GIF", "Compuserve GIF", ".gif", "GIFFormat"), + ("GRIB", "GRIB", ".grib", "PillowFormat"), + ("HDF5", "HDF5", ".h5 .hdf", "PillowFormat"), + ("ICNS", "Mac OS icns resource", ".icns", "PillowFormat"), + ("ICO", "Windows Icon", ".ico", "PillowFormat"), + ("IM", "IFUNC Image Memory", ".im", "PillowFormat"), + ("IMT", "IM Tools", "", "PillowFormat"), + ("IPTC", "IPTC/NAA", ".iim", "PillowFormat"), + ("JPEG", "JPEG (ISO 10918)", ".jfif .jpe .jpg .jpeg", "JPEGFormat"), + ( + "JPEG2000", + "JPEG 2000 (ISO 15444)", + ".jp2 .j2k .jpc .jpf .jpx .j2c", + "JPEG2000Format", + ), + ("MCIDAS", "McIdas area file", "", "PillowFormat"), + ("MIC", "Microsoft Image Composer", ".mic", "PillowFormat"), + # skipped in legacy pillow + # ("MPEG", "MPEG", ".mpg .mpeg", "PillowFormat"), + ("MPO", "MPO (CIPA DC-007)", ".mpo", "PillowFormat"), + ("MSP", "Windows Paint", ".msp", "PillowFormat"), + ("PCD", "Kodak PhotoCD", ".pcd", "PillowFormat"), + ("PCX", "Paintbrush", ".pcx", "PillowFormat"), + ("PIXAR", "PIXAR raster image", ".pxr", "PillowFormat"), + ("PNG", "Portable network graphics", ".png", "PNGFormat"), + ("PPM", "Pbmplus image", ".pbm .pgm .ppm", "PillowFormat"), + ("PSD", "Adobe Photoshop", ".psd", "PillowFormat"), + ("SGI", "SGI Image File Format", ".bw .rgb .rgba .sgi", "PillowFormat"), + ("SPIDER", "Spider 2D image", "", "PillowFormat"), + ("SUN", "Sun Raster File", ".ras", "PillowFormat"), + ("TGA", "Targa", ".tga", "PillowFormat"), + ("TIFF", "Adobe TIFF", ".tif .tiff", "TIFFFormat"), + ("WMF", "Windows Metafile", ".wmf .emf", "PillowFormat"), + ("XBM", "X11 Bitmap", ".xbm", "PillowFormat"), + ("XPM", "X11 Pixel Map", ".xpm", "PillowFormat"), + ("XVTHUMB", "XV thumbnail image", "", "PillowFormat"), +] +for id, summary, ext, class_name in PILLOW_FORMATS: + config = PluginConfig( + name=id.upper() + "-PIL", + class_name=class_name, + module_name="imageio.plugins.pillow_legacy", + is_legacy=True, + install_name="pillow", + legacy_args={ + "description": summary + " via Pillow", + "extensions": ext, + "modes": "iI" if class_name == "GIFFormat" else "i", + "plugin_id": id, + }, + ) + known_plugins[config.name] = config + +known_plugins["FFMPEG"] = PluginConfig( + name="FFMPEG", + class_name="FfmpegFormat", + module_name="imageio.plugins.ffmpeg", + is_legacy=True, + install_name="ffmpeg", + legacy_args={ + "description": "Many video formats and cameras (via ffmpeg)", + "extensions": ".mov .avi .mpg .mpeg .mp4 .mkv .webm .wmv .h264", + "modes": "I", + }, +) + +known_plugins["BSDF"] = PluginConfig( + name="BSDF", + class_name="BsdfFormat", + module_name="imageio.plugins.bsdf", + is_legacy=True, + install_name="bsdf", + legacy_args={ + "description": "Format based on the Binary Structured Data Format", + "extensions": ".bsdf", + "modes": "iIvV", + }, +) + +known_plugins["DICOM"] = PluginConfig( + name="DICOM", + class_name="DicomFormat", + module_name="imageio.plugins.dicom", + is_legacy=True, + install_name="dicom", + legacy_args={ + "description": "Digital Imaging and Communications in Medicine", + "extensions": ".dcm .ct .mri", + "modes": "iIvV", + }, +) + +known_plugins["FEI"] = PluginConfig( + name="FEI", + class_name="FEISEMFormat", + module_name="imageio.plugins.feisem", + is_legacy=True, + install_name="feisem", + legacy_args={ + "description": "FEI-SEM TIFF format", + "extensions": [".tif", ".tiff"], + "modes": "iv", + }, +) + +known_plugins["FITS"] = PluginConfig( + name="FITS", + class_name="FitsFormat", + module_name="imageio.plugins.fits", + is_legacy=True, + install_name="fits", + legacy_args={ + "description": "Flexible Image Transport System (FITS) format", + "extensions": ".fits .fit .fts .fz", + "modes": "iIvV", + }, +) + +known_plugins["GDAL"] = PluginConfig( + name="GDAL", + class_name="GdalFormat", + module_name="imageio.plugins.gdal", + is_legacy=True, + install_name="gdal", + legacy_args={ + "description": "Geospatial Data Abstraction Library", + "extensions": ".tiff .tif .img .ecw .jpg .jpeg", + "modes": "iIvV", + }, +) + +known_plugins["ITK"] = PluginConfig( + name="ITK", + class_name="ItkFormat", + module_name="imageio.plugins.simpleitk", + is_legacy=True, + install_name="simpleitk", + legacy_args={ + "description": "Insight Segmentation and Registration Toolkit (ITK) format", + "extensions": " ".join( + ( + ".gipl", + ".ipl", + ".mha", + ".mhd", + ".nhdr", + ".nia", + ".hdr", + ".nrrd", + ".nii", + ".nii.gz", + ".img", + ".img.gz", + ".vtk", + ".hdf5", + ".lsm", + ".mnc", + ".mnc2", + ".mgh", + ".mnc", + ".pic", + ".bmp", + ".jpeg", + ".jpg", + ".png", + ".tiff", + ".tif", + ".dicom", + ".dcm", + ".gdcm", + ) + ), + "modes": "iIvV", + }, +) + +known_plugins["NPZ"] = PluginConfig( + name="NPZ", + class_name="NpzFormat", + module_name="imageio.plugins.npz", + is_legacy=True, + install_name="numpy", + legacy_args={ + "description": "Numpy's compressed array format", + "extensions": ".npz", + "modes": "iIvV", + }, +) + +known_plugins["SWF"] = PluginConfig( + name="SWF", + class_name="SWFFormat", + module_name="imageio.plugins.swf", + is_legacy=True, + install_name="swf", + legacy_args={ + "description": "Shockwave flash", + "extensions": ".swf", + "modes": "I", + }, +) + +known_plugins["SCREENGRAB"] = PluginConfig( + name="SCREENGRAB", + class_name="ScreenGrabFormat", + module_name="imageio.plugins.grab", + is_legacy=True, + install_name="pillow", + legacy_args={ + "description": "Grab screenshots (Windows and OS X only)", + "extensions": [], + "modes": "i", + }, +) + +known_plugins["CLIPBOARDGRAB"] = PluginConfig( + name="CLIPBOARDGRAB", + class_name="ClipboardGrabFormat", + module_name="imageio.plugins.grab", + is_legacy=True, + install_name="pillow", + legacy_args={ + "description": "Grab from clipboard (Windows only)", + "extensions": [], + "modes": "i", + }, +) + +# LYTRO plugin (legacy) +lytro_formats = [ + ("lytro-lfr", "Lytro Illum lfr image file", ".lfr", "i", "LytroLfrFormat"), + ( + "lytro-illum-raw", + "Lytro Illum raw image file", + ".raw", + "i", + "LytroIllumRawFormat", + ), + ("lytro-lfp", "Lytro F01 lfp image file", ".lfp", "i", "LytroLfpFormat"), + ("lytro-f01-raw", "Lytro F01 raw image file", ".raw", "i", "LytroF01RawFormat"), +] +for name, des, ext, mode, class_name in lytro_formats: + config = PluginConfig( + name=name.upper(), + class_name=class_name, + module_name="imageio.plugins.lytro", + is_legacy=True, + install_name="lytro", + legacy_args={ + "description": des, + "extensions": ext, + "modes": mode, + }, + ) + known_plugins[config.name] = config + +# FreeImage plugin (legacy) +FREEIMAGE_FORMATS = [ + ( + "BMP", + 0, + "Windows or OS/2 Bitmap", + ".bmp", + "i", + "FreeimageBmpFormat", + "imageio.plugins.freeimage", + ), + ( + "CUT", + 21, + "Dr. Halo", + ".cut", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "DDS", + 24, + "DirectX Surface", + ".dds", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "EXR", + 29, + "ILM OpenEXR", + ".exr", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "G3", + 27, + "Raw fax format CCITT G.3", + ".g3", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "GIF", + 25, + "Static and animated gif (FreeImage)", + ".gif", + "iI", + "GifFormat", + "imageio.plugins.freeimagemulti", + ), + ( + "HDR", + 26, + "High Dynamic Range Image", + ".hdr", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "ICO", + 1, + "Windows Icon", + ".ico", + "iI", + "IcoFormat", + "imageio.plugins.freeimagemulti", + ), + ( + "IFF", + 5, + "IFF Interleaved Bitmap", + ".iff .lbm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "J2K", + 30, + "JPEG-2000 codestream", + ".j2k .j2c", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "JNG", + 3, + "JPEG Network Graphics", + ".jng", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "JP2", + 31, + "JPEG-2000 File Format", + ".jp2", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "JPEG", + 2, + "JPEG - JFIF Compliant", + ".jpg .jif .jpeg .jpe", + "i", + "FreeimageJpegFormat", + "imageio.plugins.freeimage", + ), + ( + "JPEG-XR", + 36, + "JPEG XR image format", + ".jxr .wdp .hdp", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "KOALA", + 4, + "C64 Koala Graphics", + ".koa", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + # not registered in legacy pillow + # ("MNG", 6, "Multiple-image Network Graphics", ".mng", "i", "FreeimageFormat", "imageio.plugins.freeimage"), + ( + "PBM", + 7, + "Portable Bitmap (ASCII)", + ".pbm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PBMRAW", + 8, + "Portable Bitmap (RAW)", + ".pbm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PCD", + 9, + "Kodak PhotoCD", + ".pcd", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PCX", + 10, + "Zsoft Paintbrush", + ".pcx", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PFM", + 32, + "Portable floatmap", + ".pfm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PGM", + 11, + "Portable Greymap (ASCII)", + ".pgm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PGMRAW", + 12, + "Portable Greymap (RAW)", + ".pgm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PICT", + 33, + "Macintosh PICT", + ".pct .pict .pic", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PNG", + 13, + "Portable Network Graphics", + ".png", + "i", + "FreeimagePngFormat", + "imageio.plugins.freeimage", + ), + ( + "PPM", + 14, + "Portable Pixelmap (ASCII)", + ".ppm", + "i", + "FreeimagePnmFormat", + "imageio.plugins.freeimage", + ), + ( + "PPMRAW", + 15, + "Portable Pixelmap (RAW)", + ".ppm", + "i", + "FreeimagePnmFormat", + "imageio.plugins.freeimage", + ), + ( + "PSD", + 20, + "Adobe Photoshop", + ".psd", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "RAS", + 16, + "Sun Raster Image", + ".ras", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "RAW", + 34, + "RAW camera image", + ".3fr .arw .bay .bmq .cap .cine .cr2 .crw .cs1 .dc2 " + ".dcr .drf .dsc .dng .erf .fff .ia .iiq .k25 .kc2 .kdc .mdc .mef .mos .mrw .nef .nrw .orf " + ".pef .ptx .pxn .qtk .raf .raw .rdc .rw2 .rwl .rwz .sr2 .srf .srw .sti", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "SGI", + 28, + "SGI Image Format", + ".sgi .rgb .rgba .bw", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "TARGA", + 17, + "Truevision Targa", + ".tga .targa", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "TIFF", + 18, + "Tagged Image File Format", + ".tif .tiff", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "WBMP", + 19, + "Wireless Bitmap", + ".wap .wbmp .wbm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "WebP", + 35, + "Google WebP image format", + ".webp", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "XBM", + 22, + "X11 Bitmap Format", + ".xbm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "XPM", + 23, + "X11 Pixmap Format", + ".xpm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), +] +for name, i, des, ext, mode, class_name, module_name in FREEIMAGE_FORMATS: + config = PluginConfig( + name=name.upper() + "-FI", + class_name=class_name, + module_name=module_name, + is_legacy=True, + install_name="freeimage", + legacy_args={ + "description": des, + "extensions": ext, + "modes": mode, + "fif": i, + }, + ) + known_plugins[config.name] = config + +# exists for backwards compatibility with FormatManager +# delete in V3 +_original_order = [x for x, config in known_plugins.items() if config.is_legacy] diff --git a/evalkit_cambrian/lib/python3.10/site-packages/imageio/freeze.py b/evalkit_cambrian/lib/python3.10/site-packages/imageio/freeze.py new file mode 100644 index 0000000000000000000000000000000000000000..3753a29df665e416030b4eb0453ed3430a4c78fc --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/imageio/freeze.py @@ -0,0 +1,11 @@ +""" +Helper functions for freezing imageio. +""" + + +def get_includes(): + return ["email", "urllib.request", "numpy", "zipfile", "io"] + + +def get_excludes(): + return [] diff --git a/evalkit_cambrian/lib/python3.10/site-packages/imageio/plugins/grab.py b/evalkit_cambrian/lib/python3.10/site-packages/imageio/plugins/grab.py new file mode 100644 index 0000000000000000000000000000000000000000..8477863e30757740e83f55d880f2a7554dbe1521 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/imageio/plugins/grab.py @@ -0,0 +1,105 @@ +""" +PIL-based formats to take screenshots and grab from the clipboard. +""" + +import threading + +import numpy as np + +from ..core import Format + + +class BaseGrabFormat(Format): + """Base format for grab formats.""" + + _pillow_imported = False + _ImageGrab = None + + def __init__(self, *args, **kwargs): + super(BaseGrabFormat, self).__init__(*args, **kwargs) + self._lock = threading.RLock() + + def _can_write(self, request): + return False + + def _init_pillow(self): + with self._lock: + if not self._pillow_imported: + self._pillow_imported = True # more like tried to import + import PIL + + if not hasattr(PIL, "__version__"): # pragma: no cover + raise ImportError("Imageio Pillow requires " "Pillow, not PIL!") + try: + from PIL import ImageGrab + except ImportError: + return None + self._ImageGrab = ImageGrab + return self._ImageGrab + + class Reader(Format.Reader): + def _open(self): + pass + + def _close(self): + pass + + def _get_data(self, index): + return self.format._get_data(index) + + +class ScreenGrabFormat(BaseGrabFormat): + """The ScreenGrabFormat provided a means to grab screenshots using + the uri of "". + + This functionality is provided via Pillow. Note that "" is + only supported on Windows and OS X. + + Parameters for reading + ---------------------- + No parameters. + """ + + def _can_read(self, request): + if request.filename != "": + return False + return bool(self._init_pillow()) + + def _get_data(self, index): + ImageGrab = self._init_pillow() + assert ImageGrab + + pil_im = ImageGrab.grab() + assert pil_im is not None + im = np.asarray(pil_im) + return im, {} + + +class ClipboardGrabFormat(BaseGrabFormat): + """The ClipboardGrabFormat provided a means to grab image data from + the clipboard, using the uri "" + + This functionality is provided via Pillow. Note that "" is + only supported on Windows. + + Parameters for reading + ---------------------- + No parameters. + """ + + def _can_read(self, request): + if request.filename != "": + return False + return bool(self._init_pillow()) + + def _get_data(self, index): + ImageGrab = self._init_pillow() + assert ImageGrab + + pil_im = ImageGrab.grabclipboard() + if pil_im is None: + raise RuntimeError( + "There seems to be no image data on the " "clipboard now." + ) + im = np.asarray(pil_im) + return im, {} diff --git a/evalkit_cambrian/lib/python3.10/site-packages/imageio/py.typed b/evalkit_cambrian/lib/python3.10/site-packages/imageio/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_cambrian/lib/python3.10/site-packages/ninja-1.11.1.3.dist-info/licenses/LICENSE_Apache_20 b/evalkit_cambrian/lib/python3.10/site-packages/ninja-1.11.1.3.dist-info/licenses/LICENSE_Apache_20 new file mode 100644 index 0000000000000000000000000000000000000000..37ec93a14fdcd0d6e525d97c0cfa6b314eaa98d8 --- /dev/null +++ b/evalkit_cambrian/lib/python3.10/site-packages/ninja-1.11.1.3.dist-info/licenses/LICENSE_Apache_20 @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/__pycache__/cluster_utils.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/ray/__pycache__/cluster_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c89dd28d4f57b4daa79a83525ab94a5d21afa91 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/ray/__pycache__/cluster_utils.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/__init__.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0ca0db624a4ede4639663dcdec95adf52b1c775 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/compat.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..618309f101d185cbb6ffde0c271476e9ecd3b5b7 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/compat.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/conftest_utils.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/conftest_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..302e4cd22e9f34f204bd0e695fa2360ca0f39a73 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/conftest_utils.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/dict.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/dict.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..463e83372968848501751ad24f79d304402d173e Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/dict.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/gcs_pubsub.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/gcs_pubsub.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fce1c7e4f9290270ddcb8a688e8f1bd3520a4027 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/gcs_pubsub.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/log.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/log.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..208bd28541f3027eb7fe8e0802d34d769f668bc8 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/log.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/log_monitor.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/log_monitor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e859e6ade7c1ef8b041ca1c63b34f63690fed9b4 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/log_monitor.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/logging_utils.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/logging_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54a7c830e534dd0753371c29b7a51d2967acee1b Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/logging_utils.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/node.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/node.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5568134ff16fe91a6084ab4c701ea9664bd6789c Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/node.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/process_watcher.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/process_watcher.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ab3d7aced3e349ee7c6029591f9ec32253f303a Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/process_watcher.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/protobuf_compat.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/protobuf_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..deb2bf41507c5cb84784213e9793da310e5ce80a Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/protobuf_compat.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/ray_experimental_perf.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/ray_experimental_perf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9841924f30ee5915bf53be483dac2448135c2c6c Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/ray_experimental_perf.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/serialization.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/serialization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64b10f3623f4056fabde73c2903cc8f88187d8b0 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/serialization.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/state_api_test_utils.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/state_api_test_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a1416a2ca572fdfff70130886db48fb0ba4eee2 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/state_api_test_utils.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/tls_utils.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/tls_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d3b21103b7bc7dcd8ededf5e27013ac44fb0f56 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/tls_utils.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/utils.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72a45cfd84e2641a03303c3c3724d3dae560c25c Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/utils.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/_private/usage/__init__.py b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/usage/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/_private/workers/__init__.py b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/workers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/_private/workers/__pycache__/__init__.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/workers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01441811d543ef37a19e0e8f686299e49a88f45d Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/ray/_private/workers/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/__init__.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5883ae6c542ce2da3f04f9b3a58458af86e34abe --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/__init__.py @@ -0,0 +1,157 @@ +# Short term workaround for https://github.com/ray-project/ray/issues/32435 +# Dataset has a hard dependency on pandas, so it doesn't need to be delayed. +import pandas # noqa +from packaging.version import parse as parse_version + +from ray._private.utils import _get_pyarrow_version +from ray.data._internal.compute import ActorPoolStrategy +from ray.data._internal.datasource.tfrecords_datasource import TFXReadOptions +from ray.data._internal.execution.interfaces import ( + ExecutionOptions, + ExecutionResources, + NodeIdStr, +) +from ray.data._internal.logging import configure_logging +from ray.data.context import DataContext, DatasetContext +from ray.data.dataset import Dataset, Schema +from ray.data.datasource import ( + BlockBasedFileDatasink, + Datasink, + Datasource, + ReadTask, + RowBasedFileDatasink, +) +from ray.data.iterator import DataIterator, DatasetIterator +from ray.data.preprocessor import Preprocessor +from ray.data.read_api import ( # noqa: F401 + from_arrow, + from_arrow_refs, + from_blocks, + from_dask, + from_huggingface, + from_items, + from_mars, + from_modin, + from_numpy, + from_numpy_refs, + from_pandas, + from_pandas_refs, + from_spark, + from_tf, + from_torch, + range, + range_tensor, + read_avro, + read_bigquery, + read_binary_files, + read_csv, + read_databricks_tables, + read_datasource, + read_delta_sharing_tables, + read_hudi, + read_iceberg, + read_images, + read_json, + read_lance, + read_mongo, + read_numpy, + read_parquet, + read_parquet_bulk, + read_sql, + read_text, + read_tfrecords, + read_webdataset, +) + +# Module-level cached global functions for callable classes. It needs to be defined here +# since it has to be process-global across cloudpickled funcs. +_map_actor_context = None + +configure_logging() + +try: + import pyarrow as pa + + # https://github.com/apache/arrow/pull/38608 deprecated `PyExtensionType`, and + # disabled it's deserialization by default. To ensure that users can load data + # written with earlier version of Ray Data, we enable auto-loading of serialized + # tensor extensions. + pyarrow_version = _get_pyarrow_version() + if not isinstance(pyarrow_version, str): + # PyArrow is mocked in documentation builds. In this case, we don't need to do + # anything. + pass + else: + from ray._private.ray_constants import env_bool + + RAY_DATA_AUTOLOAD_PYEXTENSIONTYPE = env_bool( + "RAY_DATA_AUTOLOAD_PYEXTENSIONTYPE", False + ) + + if ( + parse_version(pyarrow_version) >= parse_version("14.0.1") + and RAY_DATA_AUTOLOAD_PYEXTENSIONTYPE + ): + pa.PyExtensionType.set_auto_load(True) + # Import these arrow extension types to ensure that they are registered. + from ray.air.util.tensor_extensions.arrow import ( # noqa + ArrowTensorType, + ArrowVariableShapedTensorType, + ) +except ModuleNotFoundError: + pass + + +__all__ = [ + "ActorPoolStrategy", + "BlockBasedFileDatasink", + "Dataset", + "DataContext", + "DatasetContext", # Backwards compatibility alias. + "DataIterator", + "DatasetIterator", # Backwards compatibility alias. + "Datasink", + "Datasource", + "ExecutionOptions", + "ExecutionResources", + "NodeIdStr", + "ReadTask", + "RowBasedFileDatasink", + "Schema", + "from_dask", + "from_items", + "from_arrow", + "from_arrow_refs", + "from_mars", + "from_modin", + "from_numpy", + "from_numpy_refs", + "from_pandas", + "from_pandas_refs", + "from_spark", + "from_tf", + "from_torch", + "from_huggingface", + "range", + "range_tensor", + "read_avro", + "read_text", + "read_binary_files", + "read_csv", + "read_datasource", + "read_delta_sharing_tables", + "read_hudi", + "read_iceberg", + "read_images", + "read_json", + "read_lance", + "read_numpy", + "read_mongo", + "read_parquet", + "read_parquet_bulk", + "read_sql", + "read_tfrecords", + "read_webdataset", + "Preprocessor", + "TFXReadOptions", +] diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/__init__.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/aggregate.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/aggregate.py new file mode 100644 index 0000000000000000000000000000000000000000..2d89636997f5e7060a2407622a59d72a0be8b9cc --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/aggregate.py @@ -0,0 +1,365 @@ +import math +from typing import TYPE_CHECKING, Any, Callable, List, Optional, Union + +from ray.data._internal.null_aggregate import ( + _null_wrap_accumulate_block, + _null_wrap_accumulate_row, + _null_wrap_finalize, + _null_wrap_init, + _null_wrap_merge, +) +from ray.data._internal.planner.exchange.sort_task_spec import SortKey +from ray.data.aggregate import AggregateFn +from ray.data.block import AggType, Block, BlockAccessor + +if TYPE_CHECKING: + import pyarrow as pa + + +class _AggregateOnKeyBase(AggregateFn): + def _set_key_fn(self, on: str): + self._key_fn = on + + def _validate(self, schema: Optional[Union[type, "pa.lib.Schema"]]) -> None: + SortKey(self._key_fn).validate_schema(schema) + + +class Count(AggregateFn): + """Defines count aggregation.""" + + def __init__(self): + super().__init__( + init=lambda k: 0, + accumulate_block=( + lambda a, block: a + BlockAccessor.for_block(block).num_rows() + ), + merge=lambda a1, a2: a1 + a2, + name="count()", + ) + + +class Sum(_AggregateOnKeyBase): + """Defines sum aggregation.""" + + def __init__( + self, + on: Optional[str] = None, + ignore_nulls: bool = True, + alias_name: Optional[str] = None, + ): + self._set_key_fn(on) + if alias_name: + self._rs_name = alias_name + else: + self._rs_name = f"sum({str(on)})" + + null_merge = _null_wrap_merge(ignore_nulls, lambda a1, a2: a1 + a2) + + super().__init__( + init=_null_wrap_init(lambda k: 0), + merge=null_merge, + accumulate_block=_null_wrap_accumulate_block( + ignore_nulls, + lambda block: BlockAccessor.for_block(block).sum(on, ignore_nulls), + null_merge, + ), + finalize=_null_wrap_finalize(lambda a: a), + name=(self._rs_name), + ) + + +class Min(_AggregateOnKeyBase): + """Defines min aggregation.""" + + def __init__( + self, + on: Optional[str] = None, + ignore_nulls: bool = True, + alias_name: Optional[str] = None, + ): + self._set_key_fn(on) + if alias_name: + self._rs_name = alias_name + else: + self._rs_name = f"min({str(on)})" + + null_merge = _null_wrap_merge(ignore_nulls, min) + + super().__init__( + init=_null_wrap_init(lambda k: float("inf")), + merge=null_merge, + accumulate_block=_null_wrap_accumulate_block( + ignore_nulls, + lambda block: BlockAccessor.for_block(block).min(on, ignore_nulls), + null_merge, + ), + finalize=_null_wrap_finalize(lambda a: a), + name=(self._rs_name), + ) + + +class Max(_AggregateOnKeyBase): + """Defines max aggregation.""" + + def __init__( + self, + on: Optional[str] = None, + ignore_nulls: bool = True, + alias_name: Optional[str] = None, + ): + self._set_key_fn(on) + if alias_name: + self._rs_name = alias_name + else: + self._rs_name = f"max({str(on)})" + + null_merge = _null_wrap_merge(ignore_nulls, max) + + super().__init__( + init=_null_wrap_init(lambda k: float("-inf")), + merge=null_merge, + accumulate_block=_null_wrap_accumulate_block( + ignore_nulls, + lambda block: BlockAccessor.for_block(block).max(on, ignore_nulls), + null_merge, + ), + finalize=_null_wrap_finalize(lambda a: a), + name=(self._rs_name), + ) + + +class Mean(_AggregateOnKeyBase): + """Defines mean aggregation.""" + + def __init__( + self, + on: Optional[str] = None, + ignore_nulls: bool = True, + alias_name: Optional[str] = None, + ): + self._set_key_fn(on) + if alias_name: + self._rs_name = alias_name + else: + self._rs_name = f"mean({str(on)})" + + null_merge = _null_wrap_merge( + ignore_nulls, lambda a1, a2: [a1[0] + a2[0], a1[1] + a2[1]] + ) + + def vectorized_mean(block: Block) -> AggType: + block_acc = BlockAccessor.for_block(block) + count = block_acc.count(on) + if count == 0 or count is None: + # Empty or all null. + return None + sum_ = block_acc.sum(on, ignore_nulls) + if sum_ is None: + # ignore_nulls=False and at least one null. + return None + return [sum_, count] + + super().__init__( + init=_null_wrap_init(lambda k: [0, 0]), + merge=null_merge, + accumulate_block=_null_wrap_accumulate_block( + ignore_nulls, + vectorized_mean, + null_merge, + ), + finalize=_null_wrap_finalize(lambda a: a[0] / a[1]), + name=(self._rs_name), + ) + + +class Std(_AggregateOnKeyBase): + """Defines standard deviation aggregation. + + Uses Welford's online method for an accumulator-style computation of the + standard deviation. This method was chosen due to its numerical + stability, and it being computable in a single pass. + This may give different (but more accurate) results than NumPy, Pandas, + and sklearn, which use a less numerically stable two-pass algorithm. + See + https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm + """ + + def __init__( + self, + on: Optional[str] = None, + ddof: int = 1, + ignore_nulls: bool = True, + alias_name: Optional[str] = None, + ): + self._set_key_fn(on) + if alias_name: + self._rs_name = alias_name + else: + self._rs_name = f"std({str(on)})" + + def merge(a: List[float], b: List[float]): + # Merges two accumulations into one. + # See + # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm + M2_a, mean_a, count_a = a + M2_b, mean_b, count_b = b + delta = mean_b - mean_a + count = count_a + count_b + # NOTE: We use this mean calculation since it's more numerically + # stable than mean_a + delta * count_b / count, which actually + # deviates from Pandas in the ~15th decimal place and causes our + # exact comparison tests to fail. + mean = (mean_a * count_a + mean_b * count_b) / count + # Update the sum of squared differences. + M2 = M2_a + M2_b + (delta**2) * count_a * count_b / count + return [M2, mean, count] + + null_merge = _null_wrap_merge(ignore_nulls, merge) + + def vectorized_std(block: Block) -> AggType: + block_acc = BlockAccessor.for_block(block) + count = block_acc.count(on) + if count == 0 or count is None: + # Empty or all null. + return None + sum_ = block_acc.sum(on, ignore_nulls) + if sum_ is None: + # ignore_nulls=False and at least one null. + return None + mean = sum_ / count + M2 = block_acc.sum_of_squared_diffs_from_mean(on, ignore_nulls, mean) + return [M2, mean, count] + + def finalize(a: List[float]): + # Compute the final standard deviation from the accumulated + # sum of squared differences from current mean and the count. + M2, mean, count = a + if count < 2: + return 0.0 + return math.sqrt(M2 / (count - ddof)) + + super().__init__( + init=_null_wrap_init(lambda k: [0, 0, 0]), + merge=null_merge, + accumulate_block=_null_wrap_accumulate_block( + ignore_nulls, + vectorized_std, + null_merge, + ), + finalize=_null_wrap_finalize(finalize), + name=(self._rs_name), + ) + + +class AbsMax(_AggregateOnKeyBase): + """Defines absolute max aggregation.""" + + def __init__( + self, + on: Optional[str] = None, + ignore_nulls: bool = True, + alias_name: Optional[str] = None, + ): + self._set_key_fn(on) + on_fn = _to_on_fn(on) + if alias_name: + self._rs_name = alias_name + else: + self._rs_name = f"abs_max({str(on)})" + + super().__init__( + init=_null_wrap_init(lambda k: 0), + merge=_null_wrap_merge(ignore_nulls, max), + accumulate_row=_null_wrap_accumulate_row( + ignore_nulls, on_fn, lambda a, r: max(a, abs(r)) + ), + finalize=_null_wrap_finalize(lambda a: a), + name=(self._rs_name), + ) + + +def _to_on_fn(on: Optional[str]): + if on is None: + return lambda r: r + elif isinstance(on, str): + return lambda r: r[on] + else: + return on + + +class Quantile(_AggregateOnKeyBase): + """Defines Quantile aggregation.""" + + def __init__( + self, + on: Optional[str] = None, + q: float = 0.5, + ignore_nulls: bool = True, + alias_name: Optional[str] = None, + ): + self._set_key_fn(on) + self._q = q + if alias_name: + self._rs_name = alias_name + else: + self._rs_name = f"quantile({str(on)})" + + def merge(a: List[int], b: List[int]): + if isinstance(a, List) and isinstance(b, List): + a.extend(b) + return a + if isinstance(a, List) and (not isinstance(b, List)): + if b is not None and b != "": + a.append(b) + return a + if isinstance(b, List) and (not isinstance(a, List)): + if a is not None and a != "": + b.append(a) + return b + + ls = [] + if a is not None and a != "": + ls.append(a) + if b is not None and b != "": + ls.append(b) + return ls + + null_merge = _null_wrap_merge(ignore_nulls, merge) + + def block_row_ls(block: Block) -> AggType: + block_acc = BlockAccessor.for_block(block) + ls = [] + for row in block_acc.iter_rows(public_row_format=False): + ls.append(row.get(on)) + return ls + + import math + + def percentile(input_values, key: Optional[Callable[[Any], Any]] = None): + if not input_values: + return None + + if key is None: + key = lambda x: x # noqa: E731 + + input_values = sorted(input_values) + k = (len(input_values) - 1) * self._q + f = math.floor(k) + c = math.ceil(k) + if f == c: + return key(input_values[int(k)]) + d0 = key(input_values[int(f)]) * (c - k) + d1 = key(input_values[int(c)]) * (k - f) + return round(d0 + d1, 5) + + super().__init__( + init=_null_wrap_init(lambda k: [0]), + merge=null_merge, + accumulate_block=_null_wrap_accumulate_block( + ignore_nulls, + block_row_ls, + null_merge, + ), + finalize=_null_wrap_finalize(percentile), + name=(self._rs_name), + ) diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/arrow_block.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/arrow_block.py new file mode 100644 index 0000000000000000000000000000000000000000..1473b8fb6e3b28db776ca9c0b991edbfd516dfaa --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/arrow_block.py @@ -0,0 +1,650 @@ +import collections +import heapq +import logging +import random +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterator, + List, + Optional, + Sequence, + Tuple, + TypeVar, + Union, +) + +import numpy as np + +from ray._private.utils import _get_pyarrow_version +from ray.air.constants import TENSOR_COLUMN_NAME +from ray.air.util.tensor_extensions.arrow import ( + convert_to_pyarrow_array, + pyarrow_table_from_pydict, +) +from ray.data._internal.arrow_ops import transform_polars, transform_pyarrow +from ray.data._internal.numpy_support import convert_to_numpy +from ray.data._internal.row import TableRow +from ray.data._internal.table_block import TableBlockAccessor, TableBlockBuilder +from ray.data._internal.util import NULL_SENTINEL, find_partitions +from ray.data.block import ( + Block, + BlockAccessor, + BlockExecStats, + BlockMetadata, + BlockType, + KeyType, + U, +) +from ray.data.context import DataContext + +try: + import pyarrow +except ImportError: + pyarrow = None + + +if TYPE_CHECKING: + import pandas + + from ray.data._internal.planner.exchange.sort_task_spec import SortKey + from ray.data.aggregate import AggregateFn + + +T = TypeVar("T") +logger = logging.getLogger(__name__) + + +# We offload some transformations to polars for performance. +def get_sort_transform(context: DataContext) -> Callable: + if context.use_polars: + return transform_polars.sort + else: + return transform_pyarrow.sort + + +def get_concat_and_sort_transform(context: DataContext) -> Callable: + if context.use_polars: + return transform_polars.concat_and_sort + else: + return transform_pyarrow.concat_and_sort + + +class ArrowRow(TableRow): + """ + Row of a tabular Dataset backed by a Arrow Table block. + """ + + def __getitem__(self, key: Union[str, List[str]]) -> Any: + from ray.data.extensions import get_arrow_extension_tensor_types + + tensor_arrow_extension_types = get_arrow_extension_tensor_types() + + def get_item(keys: List[str]) -> Any: + schema = self._row.schema + if isinstance(schema.field(keys[0]).type, tensor_arrow_extension_types): + # Build a tensor row. + return tuple( + [ + ArrowBlockAccessor._build_tensor_row(self._row, col_name=key) + for key in keys + ] + ) + + table = self._row.select(keys) + if len(table) == 0: + return None + + items = [col[0] for col in table.columns] + try: + # Try to interpret this as a pyarrow.Scalar value. + return tuple([item.as_py() for item in items]) + + except AttributeError: + # Assume that this row is an element of an extension array, and + # that it is bypassing pyarrow's scalar model for Arrow < 8.0.0. + return items + + is_single_item = isinstance(key, str) + keys = [key] if is_single_item else key + + items = get_item(keys) + + if items is None: + return None + elif is_single_item: + return items[0] + else: + return items + + def __iter__(self) -> Iterator: + for k in self._row.column_names: + yield k + + def __len__(self): + return self._row.num_columns + + +class ArrowBlockBuilder(TableBlockBuilder): + def __init__(self): + if pyarrow is None: + raise ImportError("Run `pip install pyarrow` for Arrow support") + super().__init__((pyarrow.Table, bytes)) + + @staticmethod + def _table_from_pydict(columns: Dict[str, List[Any]]) -> Block: + pa_cols: Dict[str, pyarrow.Array] = dict() + + for col_name, col_vals in columns.items(): + np_col_vals = convert_to_numpy(col_vals) + + pa_cols[col_name] = convert_to_pyarrow_array(np_col_vals, col_name) + + return pyarrow_table_from_pydict(pa_cols) + + @staticmethod + def _concat_tables(tables: List[Block]) -> Block: + return transform_pyarrow.concat(tables) + + @staticmethod + def _concat_would_copy() -> bool: + return False + + @staticmethod + def _empty_table() -> "pyarrow.Table": + return pyarrow_table_from_pydict({}) + + def block_type(self) -> BlockType: + return BlockType.ARROW + + +class ArrowBlockAccessor(TableBlockAccessor): + ROW_TYPE = ArrowRow + + def __init__(self, table: "pyarrow.Table"): + if pyarrow is None: + raise ImportError("Run `pip install pyarrow` for Arrow support") + super().__init__(table) + + def column_names(self) -> List[str]: + return self._table.column_names + + def append_column(self, name: str, data: Any) -> Block: + assert name not in self._table.column_names + + if any(isinstance(item, np.ndarray) for item in data): + raise NotImplementedError( + f"`{self.__class__.__name__}.append_column()` doesn't support " + "array-like data." + ) + + return self._table.append_column(name, [data]) + + @classmethod + def from_bytes(cls, data: bytes) -> "ArrowBlockAccessor": + reader = pyarrow.ipc.open_stream(data) + return cls(reader.read_all()) + + @staticmethod + def _build_tensor_row( + row: ArrowRow, col_name: str = TENSOR_COLUMN_NAME + ) -> np.ndarray: + from packaging.version import parse as parse_version + + element = row[col_name][0] + # TODO(Clark): Reduce this to np.asarray(element) once we only support Arrow + # 9.0.0+. + pyarrow_version = _get_pyarrow_version() + if pyarrow_version is not None: + pyarrow_version = parse_version(pyarrow_version) + if pyarrow_version is None or pyarrow_version >= parse_version("8.0.0"): + assert isinstance(element, pyarrow.ExtensionScalar) + if pyarrow_version is None or pyarrow_version >= parse_version("9.0.0"): + # For Arrow 9.0.0+, accessing an element in a chunked tensor array + # produces an ArrowTensorScalar, which we convert to an ndarray using + # .as_py(). + element = element.as_py() + else: + # For Arrow 8.*, accessing an element in a chunked tensor array produces + # an ExtensionScalar, which we convert to an ndarray using our custom + # method. + element = element.type._extension_scalar_to_ndarray(element) + # For Arrow < 8.0.0, accessing an element in a chunked tensor array produces an + # ndarray, which we return directly. + assert isinstance(element, np.ndarray), type(element) + return element + + def slice(self, start: int, end: int, copy: bool = False) -> "pyarrow.Table": + view = self._table.slice(start, end - start) + if copy: + view = transform_pyarrow.combine_chunks(view) + return view + + def random_shuffle(self, random_seed: Optional[int]) -> "pyarrow.Table": + # TODO(swang): Creating this np.array index can add a lot of memory + # pressure when there are a large number of small rows. Investigate + # random shuffling in place to reduce memory pressure. + # See https://github.com/ray-project/ray/issues/42146. + random = np.random.RandomState(random_seed) + return self.take(random.permutation(self.num_rows())) + + def schema(self) -> "pyarrow.lib.Schema": + return self._table.schema + + def to_pandas(self) -> "pandas.DataFrame": + from ray.air.util.data_batch_conversion import _cast_tensor_columns_to_ndarrays + + df = self._table.to_pandas() + ctx = DataContext.get_current() + if ctx.enable_tensor_extension_casting: + df = _cast_tensor_columns_to_ndarrays(df) + return df + + def to_numpy( + self, columns: Optional[Union[str, List[str]]] = None + ) -> Union[np.ndarray, Dict[str, np.ndarray]]: + if columns is None: + columns = self._table.column_names + should_be_single_ndarray = False + elif isinstance(columns, list): + should_be_single_ndarray = False + else: + columns = [columns] + should_be_single_ndarray = True + + column_names_set = set(self._table.column_names) + for column in columns: + if column not in column_names_set: + raise ValueError( + f"Cannot find column {column}, available columns: " + f"{column_names_set}" + ) + + column_values_ndarrays = [] + + for col_name in columns: + col = self._table[col_name] + + # Combine columnar values arrays to make these contiguous + # (making them compatible with numpy format) + combined_array = transform_pyarrow.combine_chunked_array(col) + + column_values_ndarrays.append( + transform_pyarrow.to_numpy(combined_array, zero_copy_only=False) + ) + + if should_be_single_ndarray: + assert len(columns) == 1 + return column_values_ndarrays[0] + else: + return dict(zip(columns, column_values_ndarrays)) + + def to_arrow(self) -> "pyarrow.Table": + return self._table + + def num_rows(self) -> int: + # Arrow may represent an empty table via an N > 0 row, 0-column table, e.g. when + # slicing an empty table, so we return 0 if num_columns == 0. + return self._table.num_rows if self._table.num_columns > 0 else 0 + + def size_bytes(self) -> int: + return self._table.nbytes + + def _zip(self, acc: BlockAccessor) -> "Block": + r = self.to_arrow() + s = acc.to_arrow() + for col_name in s.column_names: + col = s.column(col_name) + # Ensure the column names are unique after zip. + if col_name in r.column_names: + i = 1 + new_name = col_name + while new_name in r.column_names: + new_name = "{}_{}".format(col_name, i) + i += 1 + col_name = new_name + r = r.append_column(col_name, col) + return r + + @staticmethod + def builder() -> ArrowBlockBuilder: + return ArrowBlockBuilder() + + @staticmethod + def _empty_table() -> "pyarrow.Table": + return ArrowBlockBuilder._empty_table() + + def take( + self, + indices: Union[List[int], "pyarrow.Array", "pyarrow.ChunkedArray"], + ) -> "pyarrow.Table": + """Select rows from the underlying table. + + This method is an alternative to pyarrow.Table.take(), which breaks for + extension arrays. + """ + return transform_pyarrow.take_table(self._table, indices) + + def select(self, columns: List[str]) -> "pyarrow.Table": + if not all(isinstance(col, str) for col in columns): + raise ValueError( + "Columns must be a list of column name strings when aggregating on " + f"Arrow blocks, but got: {columns}." + ) + return self._table.select(columns) + + def _sample(self, n_samples: int, sort_key: "SortKey") -> "pyarrow.Table": + indices = random.sample(range(self._table.num_rows), n_samples) + table = self._table.select(sort_key.get_columns()) + return transform_pyarrow.take_table(table, indices) + + def count(self, on: str) -> Optional[U]: + """Count the number of non-null values in the provided column.""" + import pyarrow.compute as pac + + if not isinstance(on, str): + raise ValueError( + "on must be a string when aggregating on Arrow blocks, but got:" + f"{type(on)}." + ) + + if self.num_rows() == 0: + return None + + col = self._table[on] + return pac.count(col).as_py() + + def _apply_arrow_compute( + self, compute_fn: Callable, on: str, ignore_nulls: bool + ) -> Optional[U]: + """Helper providing null handling around applying an aggregation to a column.""" + import pyarrow as pa + + if not isinstance(on, str): + raise ValueError( + "on must be a string when aggregating on Arrow blocks, but got:" + f"{type(on)}." + ) + + if self.num_rows() == 0: + return None + + col = self._table[on] + if pa.types.is_null(col.type): + return None + else: + return compute_fn(col, skip_nulls=ignore_nulls).as_py() + + def sum(self, on: str, ignore_nulls: bool) -> Optional[U]: + import pyarrow.compute as pac + + return self._apply_arrow_compute(pac.sum, on, ignore_nulls) + + def min(self, on: str, ignore_nulls: bool) -> Optional[U]: + import pyarrow.compute as pac + + return self._apply_arrow_compute(pac.min, on, ignore_nulls) + + def max(self, on: str, ignore_nulls: bool) -> Optional[U]: + import pyarrow.compute as pac + + return self._apply_arrow_compute(pac.max, on, ignore_nulls) + + def mean(self, on: str, ignore_nulls: bool) -> Optional[U]: + import pyarrow.compute as pac + + return self._apply_arrow_compute(pac.mean, on, ignore_nulls) + + def sum_of_squared_diffs_from_mean( + self, + on: str, + ignore_nulls: bool, + mean: Optional[U] = None, + ) -> Optional[U]: + import pyarrow.compute as pac + + if mean is None: + # If precomputed mean not given, we compute it ourselves. + mean = self.mean(on, ignore_nulls) + if mean is None: + return None + return self._apply_arrow_compute( + lambda col, skip_nulls: pac.sum( + pac.power(pac.subtract(col, mean), 2), + skip_nulls=skip_nulls, + ), + on, + ignore_nulls, + ) + + def sort_and_partition( + self, boundaries: List[T], sort_key: "SortKey" + ) -> List["Block"]: + if self._table.num_rows == 0: + # If the pyarrow table is empty we may not have schema + # so calling sort_indices() will raise an error. + return [self._empty_table() for _ in range(len(boundaries) + 1)] + + context = DataContext.get_current() + sort = get_sort_transform(context) + + table = sort(self._table, sort_key) + if len(boundaries) == 0: + return [table] + return find_partitions(table, boundaries, sort_key) + + def combine(self, sort_key: "SortKey", aggs: Tuple["AggregateFn"]) -> Block: + """Combine rows with the same key into an accumulator. + + This assumes the block is already sorted by key in ascending order. + + Args: + sort_key: A column name or list of column names. + If this is ``None``, place all rows in a single group. + + aggs: The aggregations to do. + + Returns: + A sorted block of [k, v_1, ..., v_n] columns where k is the groupby + key and v_i is the partially combined accumulator for the ith given + aggregation. + If key is None then the k column is omitted. + """ + keys: List[str] = sort_key.get_columns() + + def iter_groups() -> Iterator[Tuple[Sequence[KeyType], Block]]: + """Creates an iterator over zero-copy group views.""" + if not keys: + # Global aggregation consists of a single "group", so we short-circuit. + yield tuple(), self.to_block() + return + + start = end = 0 + iter = self.iter_rows(public_row_format=False) + next_row = None + while True: + try: + if next_row is None: + next_row = next(iter) + next_keys = next_row[keys] + while next_row[keys] == next_keys: + end += 1 + try: + next_row = next(iter) + except StopIteration: + next_row = None + break + yield next_keys, self.slice(start, end) + start = end + except StopIteration: + break + + builder = ArrowBlockBuilder() + for group_keys, group_view in iter_groups(): + # Aggregate. + init_vals = group_keys + if len(group_keys) == 1: + init_vals = group_keys[0] + + accumulators = [agg.init(init_vals) for agg in aggs] + for i in range(len(aggs)): + accumulators[i] = aggs[i].accumulate_block(accumulators[i], group_view) + + # Build the row. + row = {} + if keys: + for k, gk in zip(keys, group_keys): + row[k] = gk + + count = collections.defaultdict(int) + for agg, accumulator in zip(aggs, accumulators): + name = agg.name + # Check for conflicts with existing aggregation name. + if count[name] > 0: + name = self._munge_conflict(name, count[name]) + count[name] += 1 + row[name] = accumulator + + builder.add(row) + + return builder.build() + + @staticmethod + def _munge_conflict(name, count): + return f"{name}_{count+1}" + + @staticmethod + def merge_sorted_blocks( + blocks: List[Block], sort_key: "SortKey" + ) -> Tuple[Block, BlockMetadata]: + stats = BlockExecStats.builder() + blocks = [b for b in blocks if b.num_rows > 0] + if len(blocks) == 0: + ret = ArrowBlockAccessor._empty_table() + else: + # Handle blocks of different types. + blocks = TableBlockAccessor.normalize_block_types(blocks, "arrow") + concat_and_sort = get_concat_and_sort_transform(DataContext.get_current()) + ret = concat_and_sort(blocks, sort_key) + return ret, ArrowBlockAccessor(ret).get_metadata(exec_stats=stats.build()) + + @staticmethod + def aggregate_combined_blocks( + blocks: List[Block], + sort_key: "SortKey", + aggs: Tuple["AggregateFn"], + finalize: bool, + ) -> Tuple[Block, BlockMetadata]: + """Aggregate sorted, partially combined blocks with the same key range. + + This assumes blocks are already sorted by key in ascending order, + so we can do merge sort to get all the rows with the same key. + + Args: + blocks: A list of partially combined and sorted blocks. + sort_key: The column name of key or None for global aggregation. + aggs: The aggregations to do. + finalize: Whether to finalize the aggregation. This is used as an + optimization for cases where we repeatedly combine partially + aggregated groups. + + Returns: + A block of [k, v_1, ..., v_n] columns and its metadata where k is + the groupby key and v_i is the corresponding aggregation result for + the ith given aggregation. + If key is None then the k column is omitted. + """ + + stats = BlockExecStats.builder() + keys = sort_key.get_columns() + + def key_fn(r): + if keys: + return tuple(r[keys]) + else: + return (0,) + + # Replace Nones with NULL_SENTINEL to ensure safe sorting. + def key_fn_with_null_sentinel(r): + values = key_fn(r) + return [NULL_SENTINEL if v is None else v for v in values] + + # Handle blocks of different types. + blocks = TableBlockAccessor.normalize_block_types(blocks, "arrow") + + iter = heapq.merge( + *[ + ArrowBlockAccessor(block).iter_rows(public_row_format=False) + for block in blocks + ], + key=key_fn_with_null_sentinel, + ) + next_row = None + builder = ArrowBlockBuilder() + while True: + try: + if next_row is None: + next_row = next(iter) + next_keys = key_fn(next_row) + next_key_columns = keys + + def gen(): + nonlocal iter + nonlocal next_row + while key_fn(next_row) == next_keys: + yield next_row + try: + next_row = next(iter) + except StopIteration: + next_row = None + break + + # Merge. + first = True + accumulators = [None] * len(aggs) + resolved_agg_names = [None] * len(aggs) + for r in gen(): + if first: + count = collections.defaultdict(int) + for i in range(len(aggs)): + name = aggs[i].name + # Check for conflicts with existing aggregation + # name. + if count[name] > 0: + name = ArrowBlockAccessor._munge_conflict( + name, count[name] + ) + count[name] += 1 + resolved_agg_names[i] = name + accumulators[i] = r[name] + first = False + else: + for i in range(len(aggs)): + accumulators[i] = aggs[i].merge( + accumulators[i], r[resolved_agg_names[i]] + ) + # Build the row. + row = {} + if keys: + for col_name, next_key in zip(next_key_columns, next_keys): + row[col_name] = next_key + + for agg, agg_name, accumulator in zip( + aggs, resolved_agg_names, accumulators + ): + if finalize: + row[agg_name] = agg.finalize(accumulator) + else: + row[agg_name] = accumulator + + builder.add(row) + except StopIteration: + break + + ret = builder.build() + return ret, ArrowBlockAccessor(ret).get_metadata(exec_stats=stats.build()) + + def block_type(self) -> BlockType: + return BlockType.ARROW diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/batcher.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/batcher.py new file mode 100644 index 0000000000000000000000000000000000000000..d27ed089f03fb10b1c4ce422a540b08e0b7cf4e1 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/batcher.py @@ -0,0 +1,325 @@ +from typing import Optional + +from ray.data._internal.arrow_block import ArrowBlockAccessor +from ray.data._internal.arrow_ops import transform_pyarrow +from ray.data._internal.delegating_block_builder import DelegatingBlockBuilder +from ray.data.block import Block, BlockAccessor + +# pyarrow.Table.slice is slow when the table has many chunks +# so we combine chunks into a single one to make slice faster +# with the cost of an extra copy. +# See https://github.com/ray-project/ray/issues/31108 for more details. +# TODO(jjyao): remove this once +# https://github.com/apache/arrow/issues/35126 is resolved. +MIN_NUM_CHUNKS_TO_TRIGGER_COMBINE_CHUNKS = 10 + +# Delay compaction until the shuffle buffer has reached this ratio over the min +# shuffle buffer size. Setting this to 1 minimizes memory usage, at the cost of +# frequent compactions. Setting this to higher values increases memory usage but +# reduces compaction frequency. +SHUFFLE_BUFFER_COMPACTION_RATIO = 1.5 + + +class BatcherInterface: + def add(self, block: Block): + """Add a block to the block buffer. + + Args: + block: Block to add to the block buffer. + """ + raise NotImplementedError() + + def done_adding(self) -> bool: + """Indicate to the batcher that no more blocks will be added to the buffer.""" + raise NotImplementedError() + + def has_batch(self) -> bool: + """Whether this Batcher has any full batches.""" + raise NotImplementedError() + + def has_any(self) -> bool: + """Whether this Batcher has any data.""" + raise NotImplementedError() + + def next_batch(self) -> Block: + """Get the next batch from the block buffer. + + Returns: + A batch represented as a Block. + """ + raise NotImplementedError() + + +class Batcher(BatcherInterface): + """Chunks blocks into batches.""" + + # Implementation Note: When there are multiple batches per block, this batcher will + # slice off and return each batch and add the remaining block back to the buffer + # instead of optimally slicing and returning all batches from the block at once. + # This will result in extra (and nested) block slicing. However, since slices are + # zero-copy views, we sacrifice what should be a small performance hit for better + # readability. + + def __init__(self, batch_size: Optional[int], ensure_copy: bool = False): + """ + Construct a batcher that yields batches of batch_sizes rows. + + Args: + batch_size: The size of batches to yield. + ensure_copy: Whether batches are always copied from the underlying base + blocks (not zero-copy views). + """ + self._batch_size = batch_size + self._buffer = [] + self._buffer_size = 0 + self._done_adding = False + self._ensure_copy = ensure_copy + + def add(self, block: Block): + """Add a block to the block buffer. + + Note empty block is not added to buffer. + + Args: + block: Block to add to the block buffer. + """ + if BlockAccessor.for_block(block).num_rows() > 0: + self._buffer.append(block) + self._buffer_size += BlockAccessor.for_block(block).num_rows() + + def done_adding(self) -> bool: + """Indicate to the batcher that no more blocks will be added to the batcher.""" + self._done_adding = True + + def has_batch(self) -> bool: + """Whether this Batcher has any full batches.""" + return self.has_any() and ( + self._batch_size is None or self._buffer_size >= self._batch_size + ) + + def has_any(self) -> bool: + """Whether this Batcher has any data.""" + return self._buffer_size > 0 + + def next_batch(self) -> Block: + """Get the next batch from the block buffer. + + Returns: + A batch represented as a Block. + """ + assert self.has_batch() or (self._done_adding and self.has_any()) + needs_copy = self._ensure_copy + # If no batch size, short-circuit. + if self._batch_size is None: + assert len(self._buffer) == 1 + block = self._buffer[0] + if needs_copy: + # Copy block if needing to ensure fresh batch copy. + block = BlockAccessor.for_block(block) + block = block.slice(0, block.num_rows(), copy=True) + self._buffer = [] + self._buffer_size = 0 + return block + output = DelegatingBlockBuilder() + leftover = [] + needed = self._batch_size + for block in self._buffer: + accessor = BlockAccessor.for_block(block) + if needed <= 0: + # We already have a full batch, so add this block to + # the leftovers. + leftover.append(block) + elif accessor.num_rows() <= needed: + output.add_block(accessor.to_block()) + needed -= accessor.num_rows() + else: + if ( + isinstance(accessor, ArrowBlockAccessor) + and block.num_columns > 0 + and block.column(0).num_chunks + >= MIN_NUM_CHUNKS_TO_TRIGGER_COMBINE_CHUNKS + ): + accessor = BlockAccessor.for_block( + transform_pyarrow.combine_chunks(block) + ) + # We only need part of the block to fill out a batch. + output.add_block(accessor.slice(0, needed, copy=False)) + # Add the rest of the block to the leftovers. + leftover.append(accessor.slice(needed, accessor.num_rows(), copy=False)) + needed = 0 + + # Move the leftovers into the block buffer so they're the first + # blocks consumed on the next batch extraction. + self._buffer = leftover + self._buffer_size -= self._batch_size + needs_copy = needs_copy and not output.will_build_yield_copy() + batch = output.build() + if needs_copy: + # Need to ensure that the batch is a fresh copy. + batch = BlockAccessor.for_block(batch) + batch = batch.slice(0, batch.num_rows(), copy=True) + return batch + + +class ShufflingBatcher(BatcherInterface): + """Chunks blocks into shuffled batches, using a local in-memory shuffle buffer.""" + + # Implementation Note: + # + # This shuffling batcher lazily builds a shuffle buffer from added blocks, and once + # a batch is requested via .next_batch(), it concatenates the blocks into a concrete + # shuffle buffer and randomly shuffles the entire buffer. + # + # Adding of more blocks can be intermixed with retrieving batches, but it should be + # noted that we can end up performing two expensive operations on each retrieval: + # 1. Build added blocks into a concrete shuffle buffer. + # 2. Shuffling the entire buffer. + # To amortize the overhead of this process, we only shuffle the blocks after a + # delay designated by SHUFFLE_BUFFER_COMPACTION_RATIO. + # + # Similarly, adding blocks is very cheap. Each added block will be appended to a + # list, with concatenation of the underlying data delayed until the next batch + # compaction. + + def __init__( + self, + batch_size: Optional[int], + shuffle_buffer_min_size: int, + shuffle_seed: Optional[int] = None, + ): + """Constructs a random-shuffling block batcher. + + Args: + batch_size: Record batch size. + shuffle_buffer_min_size: Minimum number of rows that must be in the local + in-memory shuffle buffer in order to yield a batch. When there are no + more rows to be added to the buffer, the number of rows in the buffer + *will* decrease below this value while yielding the remaining batches, + and the final batch may have less than ``batch_size`` rows. Increasing + this will improve the randomness of the shuffle but may increase the + latency to the first batch. + shuffle_seed: The seed to use for the local random shuffle. + """ + if batch_size is None: + raise ValueError("Must specify a batch_size if using a local shuffle.") + self._batch_size = batch_size + self._shuffle_seed = shuffle_seed + if shuffle_buffer_min_size < batch_size: + # Round it up internally to `batch_size` since our algorithm requires it. + # This is harmless since it only offers extra randomization. + shuffle_buffer_min_size = batch_size + self._buffer_min_size = shuffle_buffer_min_size + self._builder = DelegatingBlockBuilder() + self._shuffle_buffer: Block = None + self._batch_head = 0 + self._done_adding = False + + def add(self, block: Block): + """Add a block to the shuffle buffer. + + Note empty block is not added to buffer. + + Args: + block: Block to add to the shuffle buffer. + """ + if BlockAccessor.for_block(block).num_rows() > 0: + self._builder.add_block(block) + + def done_adding(self) -> bool: + """Indicate to the batcher that no more blocks will be added to the batcher. + + No more blocks should be added to the batcher after calling this. + """ + self._done_adding = True + + def has_any(self) -> bool: + """Whether this batcher has any data.""" + return self._buffer_size() > 0 + + def has_batch(self) -> bool: + """Whether this batcher has any batches.""" + buffer_size = self._buffer_size() + + if not self._done_adding: + # Delay pulling of batches until the buffer is large enough in order to + # amortize compaction overhead. + return self._materialized_buffer_size() >= self._buffer_min_size or ( + buffer_size - self._batch_size + >= self._buffer_min_size * SHUFFLE_BUFFER_COMPACTION_RATIO + ) + else: + return buffer_size >= self._batch_size + + def _buffer_size(self) -> int: + """Return shuffle buffer size.""" + buffer_size = self._builder.num_rows() + buffer_size += self._materialized_buffer_size() + return buffer_size + + def _materialized_buffer_size(self) -> int: + """Return materialized (compacted portion of) shuffle buffer size.""" + if self._shuffle_buffer is None: + return 0 + # The size of the concrete (materialized) shuffle buffer, adjusting + # for the batch head position, which also serves as a counter of the number + # of already-yielded rows from the current concrete shuffle buffer. + return max( + 0, + BlockAccessor.for_block(self._shuffle_buffer).num_rows() - self._batch_head, + ) + + def next_batch(self) -> Block: + """Get the next shuffled batch from the shuffle buffer. + + Returns: + A batch represented as a Block. + """ + assert self.has_batch() or (self._done_adding and self.has_any()) + # Add rows in the builder to the shuffle buffer. Note that we delay compaction + # as much as possible to amortize the concatenation overhead. Compaction is + # only necessary when the materialized buffer size falls below the min size. + if self._builder.num_rows() > 0 and ( + self._done_adding + or self._materialized_buffer_size() <= self._buffer_min_size + ): + if self._shuffle_buffer is not None: + if self._batch_head > 0: + # Compact the materialized shuffle buffer. + block = BlockAccessor.for_block(self._shuffle_buffer) + self._shuffle_buffer = block.slice( + self._batch_head, block.num_rows() + ) + # Add the unyielded rows from the existing shuffle buffer. + self._builder.add_block(self._shuffle_buffer) + # Build the new shuffle buffer. + self._shuffle_buffer = self._builder.build() + self._shuffle_buffer = BlockAccessor.for_block( + self._shuffle_buffer + ).random_shuffle(self._shuffle_seed) + if self._shuffle_seed is not None: + self._shuffle_seed += 1 + if ( + isinstance( + BlockAccessor.for_block(self._shuffle_buffer), ArrowBlockAccessor + ) + and self._shuffle_buffer.num_columns > 0 + and self._shuffle_buffer.column(0).num_chunks + >= MIN_NUM_CHUNKS_TO_TRIGGER_COMBINE_CHUNKS + ): + self._shuffle_buffer = transform_pyarrow.combine_chunks( + self._shuffle_buffer + ) + # Reset the builder. + self._builder = DelegatingBlockBuilder() + self._batch_head = 0 + + assert self._shuffle_buffer is not None + buffer_size = BlockAccessor.for_block(self._shuffle_buffer).num_rows() + # Truncate the batch to the buffer size, if necessary. + batch_size = min(self._batch_size, buffer_size) + slice_start = self._batch_head + self._batch_head += batch_size + # Yield the shuffled batch. + return BlockAccessor.for_block(self._shuffle_buffer).slice( + slice_start, self._batch_head + ) diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/block_builder.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/block_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..d3c232200a9d5ad18b8037b148f539aa6b2b721a --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/block_builder.py @@ -0,0 +1,39 @@ +from typing import Generic + +from ray.data.block import Block, BlockAccessor, BlockType, T + + +class BlockBuilder(Generic[T]): + """A builder class for blocks.""" + + @staticmethod + def for_block(block: Block) -> "BlockBuilder": + return BlockAccessor.for_block(block).builder() + + def add(self, item: T) -> None: + """Append a single row to the block being built.""" + raise NotImplementedError + + def add_block(self, block: Block) -> None: + """Append an entire block to the block being built.""" + raise NotImplementedError + + def will_build_yield_copy(self) -> bool: + """Whether building this block will yield a new block copy.""" + raise NotImplementedError + + def build(self) -> Block: + """Build the block.""" + raise NotImplementedError + + def num_rows(self) -> int: + """Return the number of rows added in the block.""" + raise NotImplementedError + + def get_estimated_memory_usage(self) -> int: + """Return the estimated memory usage so far in bytes.""" + raise NotImplementedError + + def block_type(self) -> BlockType: + """Return the block type.""" + raise NotImplementedError diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/compute.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/compute.py new file mode 100644 index 0000000000000000000000000000000000000000..1590469d0325f3da64537c7d0ff1c18457a05bdb --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/compute.py @@ -0,0 +1,151 @@ +import logging +from typing import Any, Callable, Iterable, Optional, TypeVar, Union + +from ray.data._internal.execution.interfaces import TaskContext +from ray.data.block import Block, UserDefinedFunction +from ray.util.annotations import DeveloperAPI + +logger = logging.getLogger(__name__) + +T = TypeVar("T") +U = TypeVar("U") + + +# Block transform function applied by task and actor pools. +BlockTransform = Union[ + # TODO(Clark): Once Ray only supports Python 3.8+, use protocol to constrain block + # transform type. + # Callable[[Block, ...], Iterable[Block]] + # Callable[[Block, UserDefinedFunction, ...], Iterable[Block]], + Callable[[Iterable[Block], TaskContext], Iterable[Block]], + Callable[[Iterable[Block], TaskContext, UserDefinedFunction], Iterable[Block]], + Callable[..., Iterable[Block]], +] + + +@DeveloperAPI +class ComputeStrategy: + pass + + +@DeveloperAPI +class TaskPoolStrategy(ComputeStrategy): + def __init__( + self, + size: Optional[int] = None, + ): + """Construct TaskPoolStrategy for a Dataset transform. + + Args: + size: Specify the maximum size of the task pool. + """ + + if size is not None and size < 1: + raise ValueError("`size` must be >= 1", size) + self.size = size + + def __eq__(self, other: Any) -> bool: + return (isinstance(other, TaskPoolStrategy) and self.size == other.size) or ( + other == "tasks" and self.size is None + ) + + +class ActorPoolStrategy(ComputeStrategy): + """Specify the compute strategy for a Dataset transform. + + ActorPoolStrategy specifies that an autoscaling pool of actors should be used + for a given Dataset transform. This is useful for stateful setup of callable + classes. + + For a fixed-sized pool of size ``n``, specify ``compute=ActorPoolStrategy(size=n)``. + To autoscale from ``m`` to ``n`` actors, specify + ``ActorPoolStrategy(min_size=m, max_size=n)``. + + To increase opportunities for pipelining task dependency prefetching with + computation and avoiding actor startup delays, set max_tasks_in_flight_per_actor + to 2 or greater; to try to decrease the delay due to queueing of tasks on the worker + actors, set max_tasks_in_flight_per_actor to 1. + """ + + def __init__( + self, + *, + size: Optional[int] = None, + min_size: Optional[int] = None, + max_size: Optional[int] = None, + max_tasks_in_flight_per_actor: Optional[int] = None, + ): + """Construct ActorPoolStrategy for a Dataset transform. + + Args: + size: Specify a fixed size actor pool of this size. It is an error to + specify both `size` and `min_size` or `max_size`. + min_size: The minimize size of the actor pool. + max_size: The maximum size of the actor pool. + max_tasks_in_flight_per_actor: The maximum number of tasks to concurrently + send to a single actor worker. Increasing this will increase + opportunities for pipelining task dependency prefetching with + computation and avoiding actor startup delays, but will also increase + queueing delay. + """ + if size is not None: + if size < 1: + raise ValueError("size must be >= 1", size) + if max_size is not None or min_size is not None: + raise ValueError( + "min_size and max_size cannot be set at the same time as `size`" + ) + min_size = size + max_size = size + if min_size is not None and min_size < 1: + raise ValueError("min_size must be >= 1", min_size) + if max_size is not None: + if min_size is None: + min_size = 1 # Legacy default. + if min_size > max_size: + raise ValueError("min_size must be <= max_size", min_size, max_size) + if ( + max_tasks_in_flight_per_actor is not None + and max_tasks_in_flight_per_actor < 1 + ): + raise ValueError( + "max_tasks_in_flight_per_actor must be >= 1, got: ", + max_tasks_in_flight_per_actor, + ) + self.min_size = min_size or 1 + self.max_size = max_size or float("inf") + self.max_tasks_in_flight_per_actor = max_tasks_in_flight_per_actor + self.num_workers = 0 + self.ready_to_total_workers_ratio = 0.8 + + def __eq__(self, other: Any) -> bool: + return isinstance(other, ActorPoolStrategy) and ( + self.min_size == other.min_size + and self.max_size == other.max_size + and self.max_tasks_in_flight_per_actor + == other.max_tasks_in_flight_per_actor + ) + + +def get_compute(compute_spec: Union[str, ComputeStrategy]) -> ComputeStrategy: + if not isinstance(compute_spec, (TaskPoolStrategy, ActorPoolStrategy)): + raise ValueError( + "In Ray 2.5, the compute spec must be either " + f"TaskPoolStrategy or ActorPoolStategy, was: {compute_spec}." + ) + elif not compute_spec or compute_spec == "tasks": + return TaskPoolStrategy() + elif compute_spec == "actors": + return ActorPoolStrategy() + elif isinstance(compute_spec, ComputeStrategy): + return compute_spec + else: + raise ValueError("compute must be one of [`tasks`, `actors`, ComputeStrategy]") + + +def is_task_compute(compute_spec: Union[str, ComputeStrategy]) -> bool: + return ( + not compute_spec + or compute_spec == "tasks" + or isinstance(compute_spec, TaskPoolStrategy) + ) diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/delegating_block_builder.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/delegating_block_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..4655a8e241485bfc5ed284c77164ecfe8a632ece --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/delegating_block_builder.py @@ -0,0 +1,76 @@ +import collections +from typing import Any, Mapping, Optional + +from ray.data._internal.arrow_block import ArrowBlockBuilder +from ray.data._internal.block_builder import BlockBuilder +from ray.data.block import Block, BlockAccessor, BlockType, DataBatch + + +class DelegatingBlockBuilder(BlockBuilder): + def __init__(self): + self._builder = None + self._empty_block = None + + @property + def _inferred_block_type(self) -> Optional[BlockType]: + """The block type inferred from the first item added to the builder.""" + if self._builder is not None: + return self._builder.block_type() + return None + + def add(self, item: Mapping[str, Any]) -> None: + assert isinstance(item, collections.abc.Mapping), item + + if self._builder is None: + self._builder = ArrowBlockBuilder() + + self._builder.add(item) + + def add_batch(self, batch: DataBatch): + """Add a user-facing data batch to the builder. + + This data batch will be converted to an internal block and then added to the + underlying builder. + """ + block = BlockAccessor.batch_to_block(batch, self._inferred_block_type) + return self.add_block(block) + + def add_block(self, block: Block): + accessor = BlockAccessor.for_block(block) + if accessor.num_rows() == 0: + # Don't infer types of empty lists. Store the block and use it if no + # other data is added. https://github.com/ray-project/ray/issues/20290 + self._empty_block = block + return + if self._builder is None: + self._builder = accessor.builder() + else: + block_type = accessor.block_type() + assert block_type == self._inferred_block_type, ( + block_type, + self._inferred_block_type, + ) + + self._builder.add_block(accessor.to_block()) + + def will_build_yield_copy(self) -> bool: + if self._builder is None: + return True + return self._builder.will_build_yield_copy() + + def build(self) -> Block: + if self._builder is None: + if self._empty_block is not None: + self._builder = BlockAccessor.for_block(self._empty_block).builder() + self._builder.add_block(self._empty_block) + else: + self._builder = ArrowBlockBuilder() + return self._builder.build() + + def num_rows(self) -> int: + return self._builder.num_rows() if self._builder is not None else 0 + + def get_estimated_memory_usage(self) -> int: + if self._builder is None: + return 0 + return self._builder.get_estimated_memory_usage() diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/equalize.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/equalize.py new file mode 100644 index 0000000000000000000000000000000000000000..6279118ecb729016f989ea8e0c10e86719418fc7 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/equalize.py @@ -0,0 +1,142 @@ +from typing import List, Tuple + +from ray.data._internal.execution.interfaces import RefBundle +from ray.data._internal.split import _calculate_blocks_rows, _split_at_indices +from ray.data.block import Block, BlockMetadata, BlockPartition +from ray.types import ObjectRef + + +def _equalize( + per_split_bundles: List[RefBundle], + owned_by_consumer: bool, +) -> List[RefBundle]: + """Equalize split ref bundles into equal number of rows. + + Args: + per_split_bundles: ref bundles to equalize. + Returns: + the equalized ref bundles. + """ + if len(per_split_bundles) == 0: + return per_split_bundles + per_split_blocks_with_metadata = [bundle.blocks for bundle in per_split_bundles] + per_split_num_rows: List[List[int]] = [ + _calculate_blocks_rows(split) for split in per_split_blocks_with_metadata + ] + total_rows = sum([sum(blocks_rows) for blocks_rows in per_split_num_rows]) + target_split_size = total_rows // len(per_split_blocks_with_metadata) + + # phase 1: shave the current splits by dropping blocks (into leftovers) + # and calculate num rows needed to the meet target. + shaved_splits, per_split_needed_rows, leftovers = _shave_all_splits( + per_split_blocks_with_metadata, per_split_num_rows, target_split_size + ) + + # validate invariants + for shaved_split, split_needed_row in zip(shaved_splits, per_split_needed_rows): + num_shaved_rows = sum([meta.num_rows for _, meta in shaved_split]) + assert num_shaved_rows <= target_split_size + assert num_shaved_rows + split_needed_row == target_split_size + + # phase 2: based on the num rows needed for each shaved split, split the leftovers + # in the shape that exactly matches the rows needed. + leftover_bundle = RefBundle(leftovers, owns_blocks=owned_by_consumer) + leftover_splits = _split_leftovers(leftover_bundle, per_split_needed_rows) + + # phase 3: merge the shaved_splits and leftoever splits and return. + for i, leftover_split in enumerate(leftover_splits): + shaved_splits[i].extend(leftover_split) + + # validate invariants. + num_shaved_rows = sum([meta.num_rows for _, meta in shaved_splits[i]]) + assert num_shaved_rows == target_split_size + + # Compose the result back to RefBundle + equalized_ref_bundles: List[RefBundle] = [] + for split in shaved_splits: + equalized_ref_bundles.append(RefBundle(split, owns_blocks=owned_by_consumer)) + return equalized_ref_bundles + + +def _shave_one_split( + split: BlockPartition, num_rows_per_block: List[int], target_size: int +) -> Tuple[BlockPartition, int, BlockPartition]: + """Shave a block list to the target size. + + Args: + split: the block list to shave. + num_rows_per_block: num rows for each block in the list. + target_size: the upper bound target size of the shaved list. + Returns: + A tuple of: + - shaved block list. + - num of rows needed for the block list to meet the target size. + - leftover blocks. + + """ + # iterates through the blocks from the input list and + shaved = [] + leftovers = [] + shaved_rows = 0 + for block_with_meta, block_rows in zip(split, num_rows_per_block): + if block_rows + shaved_rows <= target_size: + shaved.append(block_with_meta) + shaved_rows += block_rows + else: + leftovers.append(block_with_meta) + num_rows_needed = target_size - shaved_rows + return shaved, num_rows_needed, leftovers + + +def _shave_all_splits( + input_splits: List[BlockPartition], + per_split_num_rows: List[List[int]], + target_size: int, +) -> Tuple[List[BlockPartition], List[int], BlockPartition]: + """Shave all block list to the target size. + + Args: + input_splits: all block list to shave. + input_splits: num rows (per block) for each block list. + target_size: the upper bound target size of the shaved lists. + Returns: + A tuple of: + - all shaved block list. + - num of rows needed for the block list to meet the target size. + - leftover blocks. + """ + shaved_splits = [] + per_split_needed_rows = [] + leftovers = [] + + for split, num_rows_per_block in zip(input_splits, per_split_num_rows): + shaved, num_rows_needed, _leftovers = _shave_one_split( + split, num_rows_per_block, target_size + ) + shaved_splits.append(shaved) + per_split_needed_rows.append(num_rows_needed) + leftovers.extend(_leftovers) + + return shaved_splits, per_split_needed_rows, leftovers + + +def _split_leftovers( + leftovers: RefBundle, per_split_needed_rows: List[int] +) -> List[BlockPartition]: + """Split leftover blocks by the num of rows needed.""" + num_splits = len(per_split_needed_rows) + split_indices = [] + prev = 0 + for i, num_rows_needed in enumerate(per_split_needed_rows): + split_indices.append(prev + num_rows_needed) + prev = split_indices[i] + split_result: Tuple[ + List[List[ObjectRef[Block]]], List[List[BlockMetadata]] + ] = _split_at_indices( + leftovers.blocks, + split_indices, + leftovers.owns_blocks, + ) + return [list(zip(block_refs, meta)) for block_refs, meta in zip(*split_result)][ + :num_splits + ] diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/logging.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..0f6eb6b7fab862e51c230edc2c47d4aacb91ed62 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/logging.py @@ -0,0 +1,208 @@ +import logging +import logging.config +import os +from typing import Optional + +import yaml + +import ray + +DEFAULT_CONFIG = { + "version": 1, + "disable_existing_loggers": False, + "formatters": { + "ray": { + "format": "%(asctime)s\t%(levelname)s %(filename)s:%(lineno)s -- %(message)s" # noqa: E501 + }, + "ray_json": {"class": "ray._private.ray_logging.formatters.JSONFormatter"}, + }, + "filters": { + "console_filter": {"()": "ray.data._internal.logging.HiddenRecordFilter"}, + "core_context_filter": { + "()": "ray._private.ray_logging.filters.CoreContextFilter" + }, + }, + "handlers": { + "file": { + "class": "ray.data._internal.logging.SessionFileHandler", + "formatter": "ray", + "filename": "ray-data.log", + }, + "file_json": { + "class": "ray.data._internal.logging.SessionFileHandler", + "formatter": "ray_json", + "filename": "ray-data.log", + "filters": ["core_context_filter"], + }, + "console": { + "class": "ray._private.log.PlainRayHandler", + "formatter": "ray", + "level": "INFO", + "filters": ["console_filter"], + }, + }, + "loggers": { + "ray.data": { + "level": "DEBUG", + "handlers": ["file", "console"], + "propagate": False, + }, + "ray.air.util.tensor_extensions": { + "level": "DEBUG", + "handlers": ["file", "console"], + "propagate": False, + }, + }, +} + +# Dictionary of substitutions to be performed when using JSON mode. Handlers with names +# corresponding to keys will be replaced by those corresponding to values. +RAY_DATA_LOG_HANDLER_JSON_SUBSTITUTIONS = {"file": "file_json"} + +# Env. variable to specify the encoding of the file logs when using the default config. +RAY_DATA_LOG_ENCODING_ENV_VAR_NAME = "RAY_DATA_LOG_ENCODING" + +# Env. variable to specify the logging config path use defaults if not set +RAY_DATA_LOGGING_CONFIG_ENV_VAR_NAME = "RAY_DATA_LOGGING_CONFIG" + +# To facilitate debugging, Ray Data writes debug logs to a file. However, if Ray Data +# logs every scheduler loop, logging might impact performance. So, we add a "TRACE" +# level where logs aren't written by default. +# +# Use the following code to log a message at the "TRACE" level: +# ``` +# logger.log(logging.getLevelName("TRACE"), "Your message here.") +# ```` +logging.addLevelName(logging.DEBUG - 1, "TRACE") + + +class HiddenRecordFilter: + """Filters out log records with the "hide" attribute set to True. + + This filter allows you to override default logging behavior. For example, if errors + are printed by default, and you don't want to print a specific error, you can set + the "hide" attribute to avoid printing the message. + + .. testcode:: + + import logging + logger = logging.getLogger("ray.data.spam") + + # This warning won't be printed to the console. + logger.warning("ham", extra={"hide": True}) + """ + + def filter(self, record): + return not getattr(record, "hide", False) + + +class SessionFileHandler(logging.Handler): + """A handler that writes to a log file in the Ray session directory. + + The Ray session directory isn't available until Ray is initialized, so this handler + lazily creates the file handler when you emit a log record. + + Args: + filename: The name of the log file. The file is created in the 'logs' directory + of the Ray session directory. + """ + + def __init__(self, filename: str): + super().__init__() + self._filename = filename + self._handler = None + self._formatter = None + self._path = None + + def emit(self, record): + if self._handler is None: + self._try_create_handler() + if self._handler is not None: + self._handler.emit(record) + + def setFormatter(self, fmt: logging.Formatter) -> None: + if self._handler is not None: + self._handler.setFormatter(fmt) + self._formatter = fmt + + def _try_create_handler(self): + assert self._handler is None + + log_directory = get_log_directory() + if log_directory is None: + return + + os.makedirs(log_directory, exist_ok=True) + + self._path = os.path.join(log_directory, self._filename) + self._handler = logging.FileHandler(self._path) + if self._formatter is not None: + self._handler.setFormatter(self._formatter) + + +def configure_logging() -> None: + """Configure the Python logger named 'ray.data'. + + This function loads the configration YAML specified by "RAY_DATA_LOGGING_CONFIG" + environment variable. If the variable isn't set, this function loads the default + "logging.yaml" file that is adjacent to this module. + + If "RAY_DATA_LOG_ENCODING" is specified as "JSON" we will enable JSON logging mode + if using the default logging config. + """ + + def _load_logging_config(config_path: str): + with open(config_path) as file: + config = yaml.safe_load(file) + return config + + # Dynamically load env vars + config_path = os.environ.get(RAY_DATA_LOGGING_CONFIG_ENV_VAR_NAME) + log_encoding = os.environ.get(RAY_DATA_LOG_ENCODING_ENV_VAR_NAME) + + if config_path is not None: + config = _load_logging_config(config_path) + else: + config = DEFAULT_CONFIG + if log_encoding is not None and log_encoding.upper() == "JSON": + for logger in config["loggers"].values(): + for ( + old_handler_name, + new_handler_name, + ) in RAY_DATA_LOG_HANDLER_JSON_SUBSTITUTIONS.items(): + logger["handlers"].remove(old_handler_name) + logger["handlers"].append(new_handler_name) + + logging.config.dictConfig(config) + + # After configuring logger, warn if RAY_DATA_LOGGING_CONFIG is used with + # RAY_DATA_LOG_ENCODING, because they are not both supported together. + if config_path is not None and log_encoding is not None: + logger = logging.getLogger(__name__) + logger.warning( + "Using `RAY_DATA_LOG_ENCODING` is not supported with " + + "`RAY_DATA_LOGGING_CONFIG`" + ) + + +def reset_logging() -> None: + """Reset the logger named 'ray.data' to its initial state. + + Used for testing. + """ + logger = logging.getLogger("ray.data") + logger.handlers.clear() + logger.setLevel(logging.NOTSET) + + +def get_log_directory() -> Optional[str]: + """Return the directory where Ray Data writes log files. + + If Ray isn't initialized, this function returns ``None``. + """ + global_node = ray._private.worker._global_node + if global_node is None: + return None + + session_dir = global_node.get_session_dir_path() + return os.path.join(session_dir, "logs", "ray-data") diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/memory_tracing.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/memory_tracing.py new file mode 100644 index 0000000000000000000000000000000000000000..f44c648452adb6d7ad2d157fa72f6af8fb6296c6 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/memory_tracing.py @@ -0,0 +1,147 @@ +"""Utility for debugging object store memory eager deletion in Datasets. + +NOTE: the performance overhead of tracing object allocation is fairly substantial. +This is meant to use in unit test for debugging. Please do not enable in production, +without performance optimization. + +Enable with RAY_DATA_TRACE_ALLOCATIONS=1. + +Basic usage is to call `trace_allocation` each time a new object is created, and call +`trace_deallocation` when an object should be disposed of. When the workload is +complete, call `leak_report` to view possibly leaked objects. + +Note that so called "leaked" objects will be reclaimed eventually by reference counting +in Ray. This is just to debug the eager deletion protocol which is more efficient. +""" + +from io import StringIO +from typing import Dict, List + +import ray +from ray.data.context import DataContext + + +def trace_allocation(ref: ray.ObjectRef, loc: str) -> None: + """Record that an object has been created. + + Args: + ref: The object created. + loc: A human-readable string identifying the call site. + """ + ctx = DataContext.get_current() + if ctx.trace_allocations: + tracer = _get_mem_actor() + # TODO: it would be nice to determine loc automatically based on the stack. + ray.get(tracer.trace_alloc.remote([ref], loc)) + + +def trace_deallocation(ref: ray.ObjectRef, loc: str, free: bool = True) -> None: + """Record that an object has been deleted (and delete if free=True). + + Args: + ref: The object we no longer need. + loc: A human-readable string identifying the call site. + free: Whether to eagerly destroy the object instead of waiting for Ray + reference counting to kick in. + """ + if free: + ray._private.internal_api.free(ref, local_only=False) + ctx = DataContext.get_current() + if ctx.trace_allocations: + tracer = _get_mem_actor() + ray.get(tracer.trace_dealloc.remote([ref], loc, free)) + + +def leak_report() -> str: + tracer = _get_mem_actor() + return ray.get(tracer.leak_report.remote()) + + +@ray.remote(num_cpus=0) +class _MemActor: + def __init__(self): + self.allocated: Dict[ray.ObjectRef, dict] = {} + self.deallocated: Dict[ray.ObjectRef, dict] = {} + self.skip_dealloc: Dict[ray.ObjectRef, str] = {} + self.peak_mem = 0 + self.cur_mem = 0 + + def trace_alloc(self, ref: List[ray.ObjectRef], loc: str): + ref = ref[0] # Avoid Ray materializing the ref. + if ref not in self.allocated: + meta = ray.experimental.get_object_locations([ref]) + size_bytes = meta.get("object_size", 0) + if not size_bytes: + size_bytes = -1 + from ray import cloudpickle as pickle + + try: + obj = ray.get(ref, timeout=5.0) + size_bytes = len(pickle.dumps(obj)) + except Exception: + print("[mem_tracing] ERROR getting size") + size_bytes = -1 + print(f"[mem_tracing] Allocated {size_bytes} bytes at {loc}: {ref}") + entry = { + "size_bytes": size_bytes, + "loc": loc, + } + self.allocated[ref] = entry + self.cur_mem += size_bytes + self.peak_mem = max(self.cur_mem, self.peak_mem) + + def trace_dealloc(self, ref: List[ray.ObjectRef], loc: str, freed: bool): + ref = ref[0] # Avoid Ray materializing the ref. + size_bytes = self.allocated.get(ref, {}).get("size_bytes", 0) + if freed: + print(f"[mem_tracing] Freed {size_bytes} bytes at {loc}: {ref}") + if ref in self.allocated: + self.cur_mem -= size_bytes + self.deallocated[ref] = self.allocated.pop(ref) + self.deallocated[ref]["dealloc_loc"] = loc + if ref in self.deallocated: + # This object reference is already deallocated. + pass + else: + print(f"[mem_tracing] WARNING: allocation of {ref} was not traced!") + else: + print(f"[mem_tracing] Skipped freeing {size_bytes} bytes at {loc}: {ref}") + self.skip_dealloc[ref] = loc + + def leak_report(self) -> str: + output = StringIO() + output.write("[mem_tracing] ===== Leaked objects =====\n") + for ref in self.allocated: + size_bytes = self.allocated[ref].get("size_bytes") + loc = self.allocated[ref].get("loc") + if ref in self.skip_dealloc: + dealloc_loc = self.skip_dealloc[ref] + output.write( + f"[mem_tracing] Leaked object, created at {loc}, size " + f"{size_bytes}, skipped dealloc at {dealloc_loc}: {ref}\n" + ) + else: + output.write( + f"[mem_tracing] Leaked object, created at {loc}, " + f"size {size_bytes}: {ref}\n" + ) + output.write("[mem_tracing] ===== End leaked objects =====\n") + output.write("[mem_tracing] ===== Freed objects =====\n") + for ref in self.deallocated: + size_bytes = self.deallocated[ref].get("size_bytes") + loc = self.deallocated[ref].get("loc") + dealloc_loc = self.deallocated[ref].get("dealloc_loc") + output.write( + f"[mem_tracing] Freed object from {loc} at {dealloc_loc}, " + f"size {size_bytes}: {ref}\n" + ) + output.write("[mem_tracing] ===== End freed objects =====\n") + output.write(f"[mem_tracing] Peak size bytes {self.peak_mem}\n") + output.write(f"[mem_tracing] Current size bytes {self.cur_mem}\n") + return output.getvalue() + + +def _get_mem_actor(): + return _MemActor.options( + name="mem_tracing_actor", get_if_exists=True, lifetime="detached" + ).remote() diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/null_aggregate.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/null_aggregate.py new file mode 100644 index 0000000000000000000000000000000000000000..afd276594a9e0b8f68c0ea0d96e70cc05a40fc8e --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/null_aggregate.py @@ -0,0 +1,276 @@ +from types import ModuleType +from typing import Any, Callable, Tuple, Union + +import numpy as np + +from ray.data.block import AggType, Block, KeyType, T, U + +WrappedAggType = Tuple[AggType, int] + + +# This module contains aggregation helpers for handling nulls. +# The null handling policy is: +# 1. Mix of values and nulls - ignore_nulls=True: Ignore the nulls, return +# aggregation of non-null values. +# 2. Mix of values and nulls - ignore_nulls=False: Return None. +# 3. All nulls: Return None. +# 4. Empty dataset: Return None. +# +# This is accomplished by checking rows for null values and by propagating nulls +# if found AND if we're not ignoring them. If not ignoring nulls, in order to delineate +# between found null rows and an empty block accumulation when merging (the latter of +# which we want to propagate; the former of which we do not), we attach a boolean flag +# indicating whether or not an accumulation contains valid data to intermediate block +# accumulations via _wrap_acc() and _unwrap_acc(). This allows us to properly merge +# intermediate block accumulations under a streaming constraint. + + +def _wrap_acc(a: AggType, has_data: bool) -> WrappedAggType: + """ + Wrap accumulation with a numeric boolean flag indicating whether or not + this accumulation contains real data; if it doesn't, we consider it to be + empty. + + Args: + a: The accumulation value. + has_data: Whether the accumulation contains real data. + + Returns: + An AggType list with the last element being a numeric boolean flag indicating + whether or not this accumulation contains real data. If the input a has length + n, the returned AggType has length n + 1. + """ + if not isinstance(a, list): + a = [a] + return a + [1 if has_data else 0] + + +def _unwrap_acc(a: WrappedAggType) -> Tuple[AggType, bool]: + """ + Unwrap the accumulation, which we assume has been wrapped (via _wrap_acc) with a + numeric boolean flag indicating whether or not this accumulation contains real data. + + Args: + a: The wrapped accumulation value that we wish to unwrap. + + Returns: + A tuple containing the unwrapped accumulation value and a boolean indicating + whether the accumulation contains real data. + """ + has_data = a[-1] == 1 + a = a[:-1] + if len(a) == 1: + a = a[0] + return a, has_data + + +def _null_wrap_init( + init: Callable[[KeyType], AggType] +) -> Callable[[KeyType], WrappedAggType]: + """ + Wraps an accumulation initializer with null handling. + + The returned initializer function adds on a has_data field that the accumulator + uses to track whether an aggregation is empty. + + Args: + init: The core init function to wrap. + + Returns: + A new accumulation initializer function that can handle nulls. + """ + + def _init(k: KeyType) -> AggType: + a = init(k) + # Initializing accumulation, so indicate that the accumulation doesn't represent + # real data yet. + return _wrap_acc(a, has_data=False) + + return _init + + +def _null_wrap_merge( + ignore_nulls: bool, + merge: Callable[[AggType, AggType], AggType], +) -> Callable[[WrappedAggType, WrappedAggType], WrappedAggType]: + """ + Wrap merge function with null handling. + + The returned merge function expects a1 and a2 to be either None or of the form: + a = [acc_data_1, ..., acc_data_2, has_data]. + + This merges two accumulations subject to the following null rules: + 1. If a1 is empty and a2 is empty, return empty accumulation. + 2. If a1 (a2) is empty and a2 (a1) is None, return None. + 3. If a1 (a2) is empty and a2 (a1) is non-None, return a2 (a1). + 4. If a1 (a2) is None, return a2 (a1) if ignoring nulls, None otherwise. + 5. If a1 and a2 are both non-null, return merge(a1, a2). + + Args: + ignore_nulls: Whether nulls should be ignored or cause a None result. + merge: The core merge function to wrap. + + Returns: + A new merge function that handles nulls. + """ + + def _merge(a1: WrappedAggType, a2: WrappedAggType) -> WrappedAggType: + if a1 is None: + # If we're ignoring nulls, propagate a2; otherwise, propagate None. + return a2 if ignore_nulls else None + unwrapped_a1, a1_has_data = _unwrap_acc(a1) + if not a1_has_data: + # If a1 is empty, propagate a2. + # No matter whether a2 is a real value, empty, or None, + # propagating each of these is correct if a1 is empty. + return a2 + if a2 is None: + # If we're ignoring nulls, propagate a1; otherwise, propagate None. + return a1 if ignore_nulls else None + unwrapped_a2, a2_has_data = _unwrap_acc(a2) + if not a2_has_data: + # If a2 is empty, propagate a1. + return a1 + a = merge(unwrapped_a1, unwrapped_a2) + return _wrap_acc(a, has_data=True) + + return _merge + + +def _null_wrap_accumulate_row( + ignore_nulls: bool, + on_fn: Callable[[T], T], + accum: Callable[[AggType, T], AggType], +) -> Callable[[WrappedAggType, T], WrappedAggType]: + """ + Wrap accumulator function with null handling. + + The returned accumulate function expects a to be either None or of the form: + a = [acc_data_1, ..., acc_data_n, has_data]. + + This performs an accumulation subject to the following null rules: + 1. If r is null and ignore_nulls=False, return None. + 2. If r is null and ignore_nulls=True, return a. + 3. If r is non-null and a is None, return None. + 4. If r is non-null and a is non-None, return accum(a[:-1], r). + + Args: + ignore_nulls: Whether nulls should be ignored or cause a None result. + on_fn: Function selecting a subset of the row to apply the aggregation. + accum: The core accumulator function to wrap. + + Returns: + A new accumulator function that handles nulls. + """ + + def _accum(a: WrappedAggType, r: T) -> WrappedAggType: + r = on_fn(r) + if _is_null(r): + if ignore_nulls: + # Ignoring nulls, return the current accumulation, ignoring r. + return a + else: + # Not ignoring nulls, so propagate the null. + return None + else: + if a is None: + # Accumulation is None so (1) a previous row must have been null, and + # (2) we must be propagating nulls, so continue to pragate this null. + return None + else: + # Row is non-null and accumulation is non-null, so we now apply the core + # accumulation. + a, _ = _unwrap_acc(a) + a = accum(a, r) + return _wrap_acc(a, has_data=True) + + return _accum + + +def _null_wrap_accumulate_block( + ignore_nulls: bool, + accum_block: Callable[[AggType, Block], AggType], + null_merge: Callable[[WrappedAggType, WrappedAggType], WrappedAggType], +) -> Callable[[WrappedAggType, Block], WrappedAggType]: + """ + Wrap vectorized aggregate function with null handling. + + This performs a block accumulation subject to the following null rules: + 1. If any row is null and ignore_nulls=False, return None. + 2. If at least one row is not null and ignore_nulls=True, return the block + accumulation. + 3. If all rows are null and ignore_nulls=True, return the base accumulation. + 4. If all rows non-null, return the block accumulation. + + Args: + ignore_nulls: Whether nulls should be ignored or cause a None result. + accum_block: The core vectorized aggregate function to wrap. + null_merge: A null-handling merge, as returned from _null_wrap_merge(). + + Returns: + A new vectorized aggregate function that handles nulls. + """ + + def _accum_block_null(a: WrappedAggType, block: Block) -> WrappedAggType: + ret = accum_block(block) + if ret is not None: + ret = _wrap_acc(ret, has_data=True) + elif ignore_nulls: + # This can happen if we're ignoring nulls but the entire block only consists + # of nulls. We treat the block as if it were empty in this case. + ret = a + return null_merge(a, ret) + + return _accum_block_null + + +def _null_wrap_finalize( + finalize: Callable[[AggType], AggType] +) -> Callable[[WrappedAggType], U]: + """ + Wrap finalizer with null handling. + + If the accumulation is empty or None, the returned finalizer returns None. + + Args: + finalize: The core finalizing function to wrap. + + Returns: + A new finalizing function that handles nulls. + """ + + def _finalize(a: AggType) -> U: + if a is None: + return None + a, has_data = _unwrap_acc(a) + if not has_data: + return None + return finalize(a) + + return _finalize + + +LazyModule = Union[None, bool, ModuleType] +_pandas: LazyModule = None + + +def _lazy_import_pandas() -> LazyModule: + global _pandas + if _pandas is None: + try: + import pandas as _pandas + except ModuleNotFoundError: + # If module is not found, set _pandas to False so we won't + # keep trying to import it on every _lazy_import_pandas() call. + _pandas = False + return _pandas + + +def _is_null(r: Any): + pd = _lazy_import_pandas() + if pd: + return pd.isnull(r) + try: + return np.isnan(r) + except TypeError: + return r is None diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/output_buffer.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/output_buffer.py new file mode 100644 index 0000000000000000000000000000000000000000..4355b6e0a233bc119135470529f0a1e7230f9df4 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/output_buffer.py @@ -0,0 +1,109 @@ +from typing import Any + +from ray.data._internal.delegating_block_builder import DelegatingBlockBuilder +from ray.data.block import Block, BlockAccessor, DataBatch +from ray.data.context import MAX_SAFE_BLOCK_SIZE_FACTOR + + +class BlockOutputBuffer: + """Generates output blocks of a given size given a stream of inputs. + + This class is used to turn a stream of items / blocks of arbitrary size + into a stream of blocks of ``target_max_block_size``. The caller should + check ``has_next()`` after each ``add()`` call, and call ``next()`` to get + the next block when ``has_next()`` returns True. + + When all items have been added, the caller must call ``finalize()`` and + then check ``has_next()`` one last time. + + Examples: + >>> from ray.data._internal.output_buffer import BlockOutputBuffer + >>> udf = ... # doctest: +SKIP + >>> generator = ... # doctest: +SKIP + >>> # Yield a stream of output blocks. + >>> output = BlockOutputBuffer(udf, 500 * 1024 * 1024) # doctest: +SKIP + >>> for item in generator(): # doctest: +SKIP + ... output.add(item) # doctest: +SKIP + ... if output.has_next(): # doctest: +SKIP + ... yield output.next() # doctest: +SKIP + >>> output.finalize() # doctest: +SKIP + >>> if output.has_next() # doctest: +SKIP + ... yield output.next() # doctest: +SKIP + """ + + def __init__(self, target_max_block_size: int): + self._target_max_block_size = target_max_block_size + self._buffer = DelegatingBlockBuilder() + self._returned_at_least_one_block = False + self._finalized = False + + def add(self, item: Any) -> None: + """Add a single item to this output buffer.""" + assert not self._finalized + self._buffer.add(item) + + def add_batch(self, batch: DataBatch) -> None: + """Add a data batch to this output buffer.""" + assert not self._finalized + self._buffer.add_batch(batch) + + def add_block(self, block: Block) -> None: + """Add a data block to this output buffer.""" + assert not self._finalized + self._buffer.add_block(block) + + def finalize(self) -> None: + """Must be called once all items have been added.""" + assert not self._finalized + self._finalized = True + + def has_next(self) -> bool: + """Returns true when a complete output block is produced.""" + if self._finalized: + return not self._returned_at_least_one_block or self._buffer.num_rows() > 0 + else: + return ( + self._buffer.get_estimated_memory_usage() > self._target_max_block_size + ) + + def next(self) -> Block: + """Returns the next complete output block.""" + assert self.has_next() + + block_to_yield = self._buffer.build() + block_remainder = None + block = BlockAccessor.for_block(block_to_yield) + if ( + block.size_bytes() + >= MAX_SAFE_BLOCK_SIZE_FACTOR * self._target_max_block_size + ): + # Slice a block to respect the target max block size. We only do + # this if we are more than 50% above the target block size, because + # this ensures that the last block produced will be at least half + # the block size. + num_bytes_per_row = block.size_bytes() // block.num_rows() + target_num_rows = max(1, self._target_max_block_size // num_bytes_per_row) + + if target_num_rows < block.num_rows(): + # NOTE: We're maintaining following protocol of slicing underlying block + # into appropriately sized ones: + # + # - (Finalized) Target blocks sliced from the original one + # and are *copied* to avoid referencing original blocks + # - Temporary remainder of the block should *NOT* be copied + # such as to avoid repeatedly copying the remainder bytes + # of the block, resulting in O(M * N) total bytes being + # copied, where N is the total number of bytes in the original + # block and M is the number of blocks that will be produced by + # this iterator + block_to_yield = block.slice(0, target_num_rows, copy=True) + block_remainder = block.slice( + target_num_rows, block.num_rows(), copy=False + ) + + self._buffer = DelegatingBlockBuilder() + if block_remainder is not None: + self._buffer.add_block(block_remainder) + + self._returned_at_least_one_block = True + return block_to_yield diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/pandas_block.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/pandas_block.py new file mode 100644 index 0000000000000000000000000000000000000000..119469b46c1b91719ffb3dcfd981abaffbc2fbc6 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/pandas_block.py @@ -0,0 +1,627 @@ +import collections +import heapq +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterator, + List, + Optional, + Sequence, + Tuple, + TypeVar, + Union, +) + +import numpy as np + +from ray.air.constants import TENSOR_COLUMN_NAME +from ray.air.util.tensor_extensions.utils import _is_ndarray_tensor +from ray.data._internal.numpy_support import convert_to_numpy, validate_numpy_batch +from ray.data._internal.row import TableRow +from ray.data._internal.table_block import TableBlockAccessor, TableBlockBuilder +from ray.data._internal.util import find_partitions +from ray.data.block import ( + Block, + BlockAccessor, + BlockExecStats, + BlockMetadata, + BlockType, + KeyType, + U, +) +from ray.data.context import DataContext + +if TYPE_CHECKING: + import pandas + import pyarrow + + from ray.data._internal.planner.exchange.sort_task_spec import SortKey + from ray.data.aggregate import AggregateFn + +T = TypeVar("T") + +_pandas = None + + +def lazy_import_pandas(): + global _pandas + if _pandas is None: + import pandas + + _pandas = pandas + return _pandas + + +class PandasRow(TableRow): + """ + Row of a tabular Dataset backed by a Pandas DataFrame block. + """ + + def __getitem__(self, key: Union[str, List[str]]) -> Any: + from ray.data.extensions import TensorArrayElement + + pd = lazy_import_pandas() + + def get_item(keys: List[str]) -> Any: + col = self._row[keys] + if len(col) == 0: + return None + + items = col.iloc[0] + if isinstance(items.iloc[0], TensorArrayElement): + # Getting an item in a Pandas tensor column may return + # a TensorArrayElement, which we have to convert to an ndarray. + return pd.Series(item.to_numpy() for item in items) + + try: + # Try to interpret this as a numpy-type value. + # See https://stackoverflow.com/questions/9452775/converting-numpy-dtypes-to-native-python-types. # noqa: E501 + return pd.Series(item.as_py() for item in items) + + except (AttributeError, ValueError): + # Fallback to the original form. + return items + + is_single_item = isinstance(key, str) + keys = [key] if is_single_item else key + + items = get_item(keys) + + if items is None: + return None + elif is_single_item: + return items.iloc[0] + else: + return items + + def __iter__(self) -> Iterator: + for k in self._row.columns: + yield k + + def __len__(self): + return self._row.shape[1] + + +class PandasBlockBuilder(TableBlockBuilder): + def __init__(self): + pandas = lazy_import_pandas() + super().__init__(pandas.DataFrame) + + @staticmethod + def _table_from_pydict(columns: Dict[str, List[Any]]) -> "pandas.DataFrame": + pandas = lazy_import_pandas() + + pd_columns: Dict[str, Any] = {} + + for col_name, col_vals in columns.items(): + np_col_vals = convert_to_numpy(col_vals) + + if col_name == TENSOR_COLUMN_NAME or _is_ndarray_tensor(np_col_vals): + from ray.data.extensions.tensor_extension import TensorArray + + pd_columns[col_name] = TensorArray(np_col_vals) + else: + pd_columns[col_name] = np_col_vals + + return pandas.DataFrame(pd_columns) + + @staticmethod + def _concat_tables(tables: List["pandas.DataFrame"]) -> "pandas.DataFrame": + pandas = lazy_import_pandas() + from ray.air.util.data_batch_conversion import ( + _cast_ndarray_columns_to_tensor_extension, + ) + + if len(tables) > 1: + df = pandas.concat(tables, ignore_index=True) + df.reset_index(drop=True, inplace=True) + else: + df = tables[0] + ctx = DataContext.get_current() + if ctx.enable_tensor_extension_casting: + df = _cast_ndarray_columns_to_tensor_extension(df) + return df + + @staticmethod + def _concat_would_copy() -> bool: + return True + + @staticmethod + def _empty_table() -> "pandas.DataFrame": + pandas = lazy_import_pandas() + return pandas.DataFrame() + + def block_type(self) -> BlockType: + return BlockType.PANDAS + + +# This is to be compatible with pyarrow.lib.schema +# TODO (kfstorm): We need a format-independent way to represent schema. +PandasBlockSchema = collections.namedtuple("PandasBlockSchema", ["names", "types"]) + + +class PandasBlockAccessor(TableBlockAccessor): + ROW_TYPE = PandasRow + + def __init__(self, table: "pandas.DataFrame"): + super().__init__(table) + + def column_names(self) -> List[str]: + return self._table.columns.tolist() + + def append_column(self, name: str, data: Any) -> Block: + assert name not in self._table.columns + + if any(isinstance(item, np.ndarray) for item in data): + raise NotImplementedError( + f"`{self.__class__.__name__}.append_column()` doesn't support " + "array-like data." + ) + + table = self._table.copy() + table[name] = data + return table + + @staticmethod + def _build_tensor_row(row: PandasRow) -> np.ndarray: + from ray.data.extensions import TensorArrayElement + + tensor = row[TENSOR_COLUMN_NAME].iloc[0] + if isinstance(tensor, TensorArrayElement): + # Getting an item in a Pandas tensor column may return a TensorArrayElement, + # which we have to convert to an ndarray. + tensor = tensor.to_numpy() + return tensor + + def slice(self, start: int, end: int, copy: bool = False) -> "pandas.DataFrame": + view = self._table[start:end] + view.reset_index(drop=True, inplace=True) + if copy: + view = view.copy(deep=True) + return view + + def take(self, indices: List[int]) -> "pandas.DataFrame": + table = self._table.take(indices) + table.reset_index(drop=True, inplace=True) + return table + + def select(self, columns: List[str]) -> "pandas.DataFrame": + if not all(isinstance(col, str) for col in columns): + raise ValueError( + "Columns must be a list of column name strings when aggregating on " + f"Pandas blocks, but got: {columns}." + ) + return self._table[columns] + + def random_shuffle(self, random_seed: Optional[int]) -> "pandas.DataFrame": + table = self._table.sample(frac=1, random_state=random_seed) + table.reset_index(drop=True, inplace=True) + return table + + def schema(self) -> PandasBlockSchema: + dtypes = self._table.dtypes + schema = PandasBlockSchema( + names=dtypes.index.tolist(), types=dtypes.values.tolist() + ) + # Column names with non-str types of a pandas DataFrame is not + # supported by Ray Dataset. + if any(not isinstance(name, str) for name in schema.names): + raise ValueError( + "A Pandas DataFrame with column names of non-str types" + " is not supported by Ray Dataset. Column names of this" + f" DataFrame: {schema.names!r}." + ) + return schema + + def to_pandas(self) -> "pandas.DataFrame": + from ray.air.util.data_batch_conversion import _cast_tensor_columns_to_ndarrays + + ctx = DataContext.get_current() + table = self._table + if ctx.enable_tensor_extension_casting: + table = _cast_tensor_columns_to_ndarrays(table) + return table + + def to_numpy( + self, columns: Optional[Union[str, List[str]]] = None + ) -> Union[np.ndarray, Dict[str, np.ndarray]]: + if columns is None: + columns = self._table.columns.tolist() + should_be_single_ndarray = False + elif isinstance(columns, list): + should_be_single_ndarray = False + else: + columns = [columns] + should_be_single_ndarray = True + + column_names_set = set(self._table.columns) + for column in columns: + if column not in column_names_set: + raise ValueError( + f"Cannot find column {column}, available columns: " + f"{self._table.columns.tolist()}" + ) + + arrays = [] + for column in columns: + arrays.append(self._table[column].to_numpy()) + + if should_be_single_ndarray: + arrays = arrays[0] + else: + arrays = dict(zip(columns, arrays)) + return arrays + + def to_arrow(self) -> "pyarrow.Table": + import pyarrow + + # Set `preserve_index=False` so that Arrow doesn't add a '__index_level_0__' + # column to the resulting table. + return pyarrow.Table.from_pandas(self._table, preserve_index=False) + + @staticmethod + def numpy_to_block( + batch: Union[Dict[str, np.ndarray], Dict[str, list]], + ) -> "pandas.DataFrame": + validate_numpy_batch(batch) + + block = PandasBlockBuilder._table_from_pydict(batch) + return block + + def num_rows(self) -> int: + return self._table.shape[0] + + def size_bytes(self) -> int: + return int(self._table.memory_usage(index=True, deep=True).sum()) + + def _zip(self, acc: BlockAccessor) -> "pandas.DataFrame": + r = self.to_pandas().copy(deep=False) + s = acc.to_pandas() + for col_name in s.columns: + col = s[col_name] + column_names = list(r.columns) + # Ensure the column names are unique after zip. + if col_name in column_names: + i = 1 + new_name = col_name + while new_name in column_names: + new_name = "{}_{}".format(col_name, i) + i += 1 + col_name = new_name + r[col_name] = col + return r + + @staticmethod + def builder() -> PandasBlockBuilder: + return PandasBlockBuilder() + + @staticmethod + def _empty_table() -> "pandas.DataFrame": + return PandasBlockBuilder._empty_table() + + def _sample(self, n_samples: int, sort_key: "SortKey") -> "pandas.DataFrame": + return self._table[sort_key.get_columns()].sample(n_samples, ignore_index=True) + + def _apply_agg( + self, agg_fn: Callable[["pandas.Series", bool], U], on: str + ) -> Optional[U]: + """Helper providing null handling around applying an aggregation to a column.""" + pd = lazy_import_pandas() + if on is not None and not isinstance(on, str): + raise ValueError( + "on must be a string or None when aggregating on Pandas blocks, but " + f"got: {type(on)}." + ) + + if self.num_rows() == 0: + return None + + col = self._table[on] + try: + val = agg_fn(col) + except TypeError as e: + # Converting an all-null column in an Arrow Table to a Pandas DataFrame + # column will result in an all-None column of object type, which will raise + # a type error when attempting to do most binary operations. We explicitly + # check for this type failure here so we can properly propagate a null. + if np.issubdtype(col.dtype, np.object_) and col.isnull().all(): + return None + raise e from None + if pd.isnull(val): + return None + return val + + def count(self, on: str) -> Optional[U]: + return self._apply_agg(lambda col: col.count(), on) + + def sum(self, on: str, ignore_nulls: bool) -> Optional[U]: + pd = lazy_import_pandas() + if on is not None and not isinstance(on, str): + raise ValueError( + "on must be a string or None when aggregating on Pandas blocks, but " + f"got: {type(on)}." + ) + + if self.num_rows() == 0: + return None + + col = self._table[on] + if col.isnull().all(): + # Short-circuit on an all-null column, returning None. This is required for + # sum() since it will otherwise return 0 when summing on an all-null column, + # which is not what we want. + return None + val = col.sum(skipna=ignore_nulls) + if pd.isnull(val): + return None + return val + + def min(self, on: str, ignore_nulls: bool) -> Optional[U]: + return self._apply_agg(lambda col: col.min(skipna=ignore_nulls), on) + + def max(self, on: str, ignore_nulls: bool) -> Optional[U]: + return self._apply_agg(lambda col: col.max(skipna=ignore_nulls), on) + + def mean(self, on: str, ignore_nulls: bool) -> Optional[U]: + return self._apply_agg(lambda col: col.mean(skipna=ignore_nulls), on) + + def sum_of_squared_diffs_from_mean( + self, + on: str, + ignore_nulls: bool, + mean: Optional[U] = None, + ) -> Optional[U]: + if mean is None: + mean = self.mean(on, ignore_nulls) + return self._apply_agg( + lambda col: ((col - mean) ** 2).sum(skipna=ignore_nulls), + on, + ) + + def sort_and_partition( + self, boundaries: List[T], sort_key: "SortKey" + ) -> List[Block]: + if self._table.shape[0] == 0: + # If the pyarrow table is empty we may not have schema + # so calling sort_indices() will raise an error. + return [self._empty_table() for _ in range(len(boundaries) + 1)] + + columns, ascending = sort_key.to_pandas_sort_args() + table = self._table.sort_values(by=columns, ascending=ascending) + if len(boundaries) == 0: + return [table] + + return find_partitions(table, boundaries, sort_key) + + def combine( + self, sort_key: "SortKey", aggs: Tuple["AggregateFn"] + ) -> "pandas.DataFrame": + """Combine rows with the same key into an accumulator. + + This assumes the block is already sorted by key in ascending order. + + Args: + sort_key: A SortKey object which holds column names/keys. + If this is ``None``, place all rows in a single group. + + aggs: The aggregations to do. + + Returns: + A sorted block of [k, v_1, ..., v_n] columns where k is the groupby + key and v_i is the partially combined accumulator for the ith given + aggregation. + If key is None then the k column is omitted. + """ + keys: List[str] = sort_key.get_columns() + pd = lazy_import_pandas() + + def iter_groups() -> Iterator[Tuple[Sequence[KeyType], Block]]: + """Creates an iterator over zero-copy group views.""" + if not keys: + # Global aggregation consists of a single "group", so we short-circuit. + yield tuple(), self.to_block() + return + + start = end = 0 + iter = self.iter_rows(public_row_format=False) + next_row = None + while True: + try: + if next_row is None: + next_row = next(iter) + next_keys = next_row[keys] + while np.all(next_row[keys] == next_keys): + end += 1 + try: + next_row = next(iter) + except StopIteration: + next_row = None + break + if isinstance(next_keys, pd.Series): + next_keys = next_keys.values + yield next_keys, self.slice(start, end, copy=False) + start = end + except StopIteration: + break + + builder = PandasBlockBuilder() + for group_keys, group_view in iter_groups(): + # Aggregate. + init_vals = group_keys + if len(group_keys) == 1: + init_vals = group_keys[0] + accumulators = [agg.init(init_vals) for agg in aggs] + for i in range(len(aggs)): + accumulators[i] = aggs[i].accumulate_block(accumulators[i], group_view) + + # Build the row. + row = {} + if keys: + for k, gk in zip(keys, group_keys): + row[k] = gk + + count = collections.defaultdict(int) + for agg, accumulator in zip(aggs, accumulators): + name = agg.name + # Check for conflicts with existing aggregation name. + if count[name] > 0: + name = self._munge_conflict(name, count[name]) + count[name] += 1 + row[name] = accumulator + + builder.add(row) + + return builder.build() + + @staticmethod + def merge_sorted_blocks( + blocks: List[Block], sort_key: "SortKey" + ) -> Tuple["pandas.DataFrame", BlockMetadata]: + pd = lazy_import_pandas() + stats = BlockExecStats.builder() + blocks = [b for b in blocks if b.shape[0] > 0] + if len(blocks) == 0: + ret = PandasBlockAccessor._empty_table() + else: + # Handle blocks of different types. + blocks = TableBlockAccessor.normalize_block_types(blocks, "pandas") + ret = pd.concat(blocks, ignore_index=True) + columns, ascending = sort_key.to_pandas_sort_args() + ret = ret.sort_values(by=columns, ascending=ascending) + return ret, PandasBlockAccessor(ret).get_metadata(exec_stats=stats.build()) + + @staticmethod + def aggregate_combined_blocks( + blocks: List["pandas.DataFrame"], + sort_key: "SortKey", + aggs: Tuple["AggregateFn"], + finalize: bool, + ) -> Tuple["pandas.DataFrame", BlockMetadata]: + """Aggregate sorted, partially combined blocks with the same key range. + + This assumes blocks are already sorted by key in ascending order, + so we can do merge sort to get all the rows with the same key. + + Args: + blocks: A list of partially combined and sorted blocks. + sort_key: The column name of key or None for global aggregation. + aggs: The aggregations to do. + finalize: Whether to finalize the aggregation. This is used as an + optimization for cases where we repeatedly combine partially + aggregated groups. + + Returns: + A block of [k, v_1, ..., v_n] columns and its metadata where k is + the groupby key and v_i is the corresponding aggregation result for + the ith given aggregation. + If key is None then the k column is omitted. + """ + + stats = BlockExecStats.builder() + keys = sort_key.get_columns() + + def key_fn(r): + if keys: + return tuple(r[keys]) + else: + return (0,) + + # Handle blocks of different types. + blocks = TableBlockAccessor.normalize_block_types(blocks, "pandas") + + iter = heapq.merge( + *[ + PandasBlockAccessor(block).iter_rows(public_row_format=False) + for block in blocks + ], + key=key_fn, + ) + next_row = None + builder = PandasBlockBuilder() + while True: + try: + if next_row is None: + next_row = next(iter) + next_keys = key_fn(next_row) + next_key_columns = keys + + def gen(): + nonlocal iter + nonlocal next_row + while key_fn(next_row) == next_keys: + yield next_row + try: + next_row = next(iter) + except StopIteration: + next_row = None + break + + # Merge. + first = True + accumulators = [None] * len(aggs) + resolved_agg_names = [None] * len(aggs) + for r in gen(): + if first: + count = collections.defaultdict(int) + for i in range(len(aggs)): + name = aggs[i].name + # Check for conflicts with existing aggregation + # name. + if count[name] > 0: + name = PandasBlockAccessor._munge_conflict( + name, count[name] + ) + count[name] += 1 + resolved_agg_names[i] = name + accumulators[i] = r[name] + first = False + else: + for i in range(len(aggs)): + accumulators[i] = aggs[i].merge( + accumulators[i], r[resolved_agg_names[i]] + ) + # Build the row. + row = {} + if keys: + for col_name, next_key in zip(next_key_columns, next_keys): + row[col_name] = next_key + + for agg, agg_name, accumulator in zip( + aggs, resolved_agg_names, accumulators + ): + if finalize: + row[agg_name] = agg.finalize(accumulator) + else: + row[agg_name] = accumulator + + builder.add(row) + except StopIteration: + break + + ret = builder.build() + return ret, PandasBlockAccessor(ret).get_metadata(exec_stats=stats.build()) + + def block_type(self) -> BlockType: + return BlockType.PANDAS diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/plan.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/plan.py new file mode 100644 index 0000000000000000000000000000000000000000..40f24ea4326851317fd4df01f5b482110e505116 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/plan.py @@ -0,0 +1,602 @@ +import copy +import itertools +import logging +from typing import TYPE_CHECKING, Iterator, List, Optional, Tuple, Type, Union + +import pyarrow + +import ray +from ray._private.internal_api import get_memory_info_reply, get_state_from_address +from ray.data._internal.execution.interfaces import RefBundle +from ray.data._internal.logical.interfaces.logical_operator import LogicalOperator +from ray.data._internal.logical.interfaces.logical_plan import LogicalPlan +from ray.data._internal.logical.operators.from_operators import AbstractFrom +from ray.data._internal.logical.operators.input_data_operator import InputData +from ray.data._internal.logical.operators.read_operator import Read +from ray.data._internal.stats import DatasetStats +from ray.data._internal.util import create_dataset_tag, unify_block_metadata_schema +from ray.data.block import BlockMetadata +from ray.data.context import DataContext +from ray.data.exceptions import omit_traceback_stdout +from ray.util.debug import log_once + +if TYPE_CHECKING: + + from ray.data._internal.execution.interfaces import Executor + from ray.data.dataset import Dataset + + +# Scheduling strategy can be inherited from prev operator if not specified. +INHERITABLE_REMOTE_ARGS = ["scheduling_strategy"] + + +logger = logging.getLogger(__name__) + + +class ExecutionPlan: + """A lazy execution plan for a Dataset. + + This lazy execution plan builds up a chain of ``List[RefBundle]`` --> + ``List[RefBundle]`` operators. Prior to execution, we apply a set of logical + plan optimizations, such as operator fusion, in order to reduce Ray task + overhead and data copies. + + Internally, the execution plan holds a snapshot of a computed list of + blocks and their associated metadata under ``self._snapshot_bundle``, + where this snapshot is the cached output of executing the operator chain.""" + + def __init__( + self, + stats: DatasetStats, + *, + data_context: Optional[DataContext] = None, + ): + """Create a plan with no transformation operators. + + Args: + stats: Stats for the base blocks. + data_context: :class:`~ray.data.context.DataContext` + object to use for execution. + """ + self._in_stats = stats + # A computed snapshot of some prefix of operators and their corresponding + # output blocks and stats. + self._snapshot_operator: Optional[LogicalOperator] = None + self._snapshot_stats = None + self._snapshot_bundle = None + # Snapshot of only metadata corresponding to the final operator's + # output bundles, used as the source of truth for the Dataset's schema + # and count. This is calculated and cached when the plan is executed as an + # iterator (`execute_to_iterator()`), and avoids caching + # all of the output blocks in memory like in `self.snapshot_bundle`. + # TODO(scottjlee): To keep the caching logic consistent, update `execute()` + # to also store the metadata in `_snapshot_metadata` instead of + # `_snapshot_bundle`. For example, we could store the blocks in + # `self._snapshot_blocks` and the metadata in `self._snapshot_metadata`. + self._snapshot_metadata: Optional[BlockMetadata] = None + + # Cached schema. + self._schema = None + # Set when a Dataset is constructed with this plan + self._dataset_uuid = None + + self._dataset_name = None + + self._has_started_execution = False + + if data_context is None: + # Snapshot the current context, so that the config of Datasets is always + # determined by the config at the time it was created. + self._context = copy.deepcopy(DataContext.get_current()) + else: + self._context = data_context + + def __repr__(self) -> str: + return ( + f"ExecutionPlan(" + f"dataset_uuid={self._dataset_uuid}, " + f"snapshot_operator={self._snapshot_operator}" + f")" + ) + + def get_plan_as_string(self, dataset_cls: Type["Dataset"]) -> str: + """Create a cosmetic string representation of this execution plan. + + Returns: + The string representation of this execution plan. + """ + # NOTE: this is used for Dataset.__repr__ to give a user-facing string + # representation. Ideally ExecutionPlan.__repr__ should be replaced with this + # method as well. + + from ray.data.dataset import MaterializedDataset + + # Do not force execution for schema, as this method is expected to be very + # cheap. + plan_str = "" + plan_max_depth = 0 + if not self.has_computed_output(): + + def generate_logical_plan_string( + op: LogicalOperator, + curr_str: str = "", + depth: int = 0, + ): + """Traverse (DFS) the LogicalPlan DAG and + return a string representation of the operators.""" + if isinstance(op, (Read, InputData, AbstractFrom)): + return curr_str, depth + + curr_max_depth = depth + op_name = op.name + if depth == 0: + curr_str += f"{op_name}\n" + else: + trailing_space = " " * ((depth - 1) * 3) + curr_str += f"{trailing_space}+- {op_name}\n" + + for input in op.input_dependencies: + curr_str, input_max_depth = generate_logical_plan_string( + input, curr_str, depth + 1 + ) + curr_max_depth = max(curr_max_depth, input_max_depth) + return curr_str, curr_max_depth + + # generate_logical_plan_string(self._logical_plan.dag) + plan_str, plan_max_depth = generate_logical_plan_string( + self._logical_plan.dag + ) + + if self._snapshot_bundle is not None: + # This plan has executed some but not all operators. + schema = unify_block_metadata_schema(self._snapshot_bundle.metadata) + count = self._snapshot_bundle.num_rows() + elif self._snapshot_metadata is not None: + schema = self._snapshot_metadata.schema + count = self._snapshot_metadata.num_rows + else: + # This plan hasn't executed any operators. + sources = self._logical_plan.sources() + # TODO(@bveeramani): Handle schemas for n-ary operators like `Union`. + if len(sources) > 1: + # Multiple sources, cannot determine schema. + schema = None + count = None + else: + assert len(sources) == 1 + plan = ExecutionPlan(DatasetStats(metadata={}, parent=None)) + plan.link_logical_plan(LogicalPlan(sources[0], plan._context)) + schema = plan.schema() + count = plan.meta_count() + else: + # Get schema of output blocks. + schema = self.schema(fetch_if_missing=False) + count = self._snapshot_bundle.num_rows() + + if schema is None: + schema_str = "Unknown schema" + elif isinstance(schema, type): + schema_str = str(schema) + else: + schema_str = [] + for n, t in zip(schema.names, schema.types): + if hasattr(t, "__name__"): + t = t.__name__ + schema_str.append(f"{n}: {t}") + schema_str = ", ".join(schema_str) + schema_str = "{" + schema_str + "}" + + if count is None: + count = "?" + + num_blocks = None + if dataset_cls == MaterializedDataset: + num_blocks = self.initial_num_blocks() + assert num_blocks is not None + + name_str = ( + "name={}, ".format(self._dataset_name) + if self._dataset_name is not None + else "" + ) + num_blocks_str = f"num_blocks={num_blocks}, " if num_blocks else "" + + dataset_str = "{}({}{}num_rows={}, schema={})".format( + dataset_cls.__name__, + name_str, + num_blocks_str, + count, + schema_str, + ) + + # If the resulting string representation fits in one line, use it directly. + SCHEMA_LINE_CHAR_LIMIT = 80 + MIN_FIELD_LENGTH = 10 + INDENT_STR = " " * 3 + trailing_space = INDENT_STR * plan_max_depth + + if len(dataset_str) > SCHEMA_LINE_CHAR_LIMIT: + # If the resulting string representation exceeds the line char limit, + # first try breaking up each `Dataset` parameter into its own line + # and check if each line fits within the line limit. We check the + # `schema` param's length, since this is likely the longest string. + schema_str_on_new_line = f"{trailing_space}{INDENT_STR}schema={schema_str}" + if len(schema_str_on_new_line) > SCHEMA_LINE_CHAR_LIMIT: + # If the schema cannot fit on a single line, break up each field + # into its own line. + schema_str = [] + for n, t in zip(schema.names, schema.types): + if hasattr(t, "__name__"): + t = t.__name__ + col_str = f"{trailing_space}{INDENT_STR * 2}{n}: {t}" + # If the field line exceeds the char limit, abbreviate + # the field name to fit while maintaining the full type + if len(col_str) > SCHEMA_LINE_CHAR_LIMIT: + shortened_suffix = f"...: {str(t)}" + # Show at least 10 characters of the field name, even if + # we have already hit the line limit with the type. + chars_left_for_col_name = max( + SCHEMA_LINE_CHAR_LIMIT - len(shortened_suffix), + MIN_FIELD_LENGTH, + ) + col_str = ( + f"{col_str[:chars_left_for_col_name]}{shortened_suffix}" + ) + schema_str.append(col_str) + schema_str = ",\n".join(schema_str) + schema_str = ( + "{\n" + schema_str + f"\n{trailing_space}{INDENT_STR}" + "}" + ) + name_str = ( + f"\n{trailing_space}{INDENT_STR}name={self._dataset_name}," + if self._dataset_name is not None + else "" + ) + num_blocks_str = ( + f"\n{trailing_space}{INDENT_STR}num_blocks={num_blocks}," + if num_blocks + else "" + ) + dataset_str = ( + f"{dataset_cls.__name__}(" + f"{name_str}" + f"{num_blocks_str}" + f"\n{trailing_space}{INDENT_STR}num_rows={count}," + f"\n{trailing_space}{INDENT_STR}schema={schema_str}" + f"\n{trailing_space})" + ) + + if plan_max_depth == 0: + plan_str += dataset_str + else: + plan_str += f"{INDENT_STR * (plan_max_depth - 1)}+- {dataset_str}" + return plan_str + + def link_logical_plan(self, logical_plan: "LogicalPlan"): + """Link the logical plan into this execution plan. + + This is used for triggering execution for optimizer code path in this legacy + execution plan. + """ + self._logical_plan = logical_plan + self._logical_plan._context = self._context + + def copy(self) -> "ExecutionPlan": + """Create a shallow copy of this execution plan. + + This copy can be executed without mutating the original, but clearing the copy + will also clear the original. + + Returns: + A shallow copy of this execution plan. + """ + plan_copy = ExecutionPlan( + self._in_stats, + data_context=self._context, + ) + if self._snapshot_bundle is not None: + # Copy over the existing snapshot. + plan_copy._snapshot_bundle = self._snapshot_bundle + plan_copy._snapshot_operator = self._snapshot_operator + plan_copy._snapshot_stats = self._snapshot_stats + plan_copy._dataset_name = self._dataset_name + return plan_copy + + def deep_copy(self) -> "ExecutionPlan": + """Create a deep copy of this execution plan. + + This copy can be executed AND cleared without mutating the original. + + Returns: + A deep copy of this execution plan. + """ + plan_copy = ExecutionPlan(copy.copy(self._in_stats)) + if self._snapshot_bundle: + # Copy over the existing snapshot. + plan_copy._snapshot_bundle = copy.copy(self._snapshot_bundle) + plan_copy._snapshot_operator = copy.copy(self._snapshot_operator) + plan_copy._snapshot_stats = copy.copy(self._snapshot_stats) + plan_copy._dataset_name = self._dataset_name + return plan_copy + + def initial_num_blocks(self) -> Optional[int]: + """Get the estimated number of blocks from the logical plan + after applying execution plan optimizations, but prior to + fully executing the dataset.""" + return self._logical_plan.dag.estimated_num_outputs() + + def schema( + self, fetch_if_missing: bool = False + ) -> Union[type, "pyarrow.lib.Schema"]: + """Get the schema after applying all execution plan optimizations, + but prior to fully executing the dataset + (unless `fetch_if_missing` is set to True). + + Args: + fetch_if_missing: Whether to execute the plan to fetch the schema. + + Returns: + The schema of the output dataset. + """ + if self._schema is not None: + return self._schema + + schema = None + if self.has_computed_output(): + schema = unify_block_metadata_schema(self._snapshot_bundle.metadata) + elif self._logical_plan.dag.aggregate_output_metadata().schema is not None: + schema = self._logical_plan.dag.aggregate_output_metadata().schema + elif fetch_if_missing: + iter_ref_bundles, _, _ = self.execute_to_iterator() + for ref_bundle in iter_ref_bundles: + for metadata in ref_bundle.metadata: + if metadata.schema is not None and ( + metadata.num_rows is None or metadata.num_rows > 0 + ): + schema = metadata.schema + break + elif self.is_read_only(): + # For consistency with the previous implementation, we fetch the schema if + # the plan is read-only even if `fetch_if_missing` is False. + iter_ref_bundles, _, _ = self.execute_to_iterator() + try: + ref_bundle = next(iter(iter_ref_bundles)) + for metadata in ref_bundle.metadata: + if metadata.schema is not None: + schema = metadata.schema + break + except StopIteration: # Empty dataset. + schema = None + + self._schema = schema + return self._schema + + def cache_schema(self, schema: Union[type, "pyarrow.lib.Schema"]): + self._schema = schema + + def input_files(self) -> Optional[List[str]]: + """Get the input files of the dataset, if available.""" + return self._logical_plan.dag.aggregate_output_metadata().input_files + + def meta_count(self) -> Optional[int]: + """Get the number of rows after applying all plan optimizations, if possible. + + This method will never trigger any computation. + + Returns: + The number of records of the result Dataset, or None. + """ + if self.has_computed_output(): + num_rows = sum(m.num_rows for m in self._snapshot_bundle.metadata) + elif self._logical_plan.dag.aggregate_output_metadata().num_rows is not None: + num_rows = self._logical_plan.dag.aggregate_output_metadata().num_rows + else: + num_rows = None + return num_rows + + @omit_traceback_stdout + def execute_to_iterator( + self, + ) -> Tuple[Iterator[RefBundle], DatasetStats, Optional["Executor"]]: + """Execute this plan, returning an iterator. + + This will use streaming execution to generate outputs. + + Returns: + Tuple of iterator over output RefBundles, DatasetStats, and the executor. + """ + self._has_started_execution = True + + # Always used the saved context for execution. + ctx = self._context + + if self.has_computed_output(): + bundle = self.execute() + return iter([bundle]), self._snapshot_stats, None + + from ray.data._internal.execution.legacy_compat import ( + execute_to_legacy_bundle_iterator, + ) + from ray.data._internal.execution.streaming_executor import StreamingExecutor + + metrics_tag = create_dataset_tag(self._dataset_name, self._dataset_uuid) + executor = StreamingExecutor(copy.deepcopy(ctx.execution_options), metrics_tag) + bundle_iter = execute_to_legacy_bundle_iterator(executor, self) + # Since the generator doesn't run any code until we try to fetch the first + # value, force execution of one bundle before we call get_stats(). + gen = iter(bundle_iter) + try: + bundle_iter = itertools.chain([next(gen)], gen) + except StopIteration: + pass + self._snapshot_stats = executor.get_stats() + return bundle_iter, self._snapshot_stats, executor + + @omit_traceback_stdout + def execute( + self, + preserve_order: bool = False, + ) -> RefBundle: + """Execute this plan. + + Args: + preserve_order: Whether to preserve order in execution. + + Returns: + The blocks of the output dataset. + """ + self._has_started_execution = True + + # Always used the saved context for execution. + context = self._context + + if not ray.available_resources().get("CPU"): + if log_once("cpu_warning"): + logger.warning( + "Warning: The Ray cluster currently does not have " + "any available CPUs. The Dataset job will hang unless more CPUs " + "are freed up. A common reason is that cluster resources are " + "used by Actors or Tune trials; see the following link " + "for more details: " + "https://docs.ray.io/en/latest/data/data-internals.html#ray-data-and-tune" # noqa: E501 + ) + if not self.has_computed_output(): + from ray.data._internal.execution.legacy_compat import ( + _get_initial_stats_from_plan, + execute_to_legacy_block_list, + ) + + if self._logical_plan.dag.output_data() is not None: + # If the data is already materialized (e.g., `from_pandas`), we can + # skip execution and directly return the output data. This avoids + # recording unnecessary metrics for an empty plan execution. + stats = _get_initial_stats_from_plan(self) + + # TODO(@bveeramani): Make `ExecutionPlan.execute()` return + # `List[RefBundle]` instead of `RefBundle`. Among other reasons, it'd + # allow us to remove the unwrapping logic below. + output_bundles = self._logical_plan.dag.output_data() + owns_blocks = all(bundle.owns_blocks for bundle in output_bundles) + bundle = RefBundle( + [ + (block, metadata) + for bundle in output_bundles + for block, metadata in bundle.blocks + ], + owns_blocks=owns_blocks, + ) + else: + from ray.data._internal.execution.streaming_executor import ( + StreamingExecutor, + ) + + metrics_tag = create_dataset_tag(self._dataset_name, self._dataset_uuid) + executor = StreamingExecutor( + copy.deepcopy(context.execution_options), + metrics_tag, + ) + blocks = execute_to_legacy_block_list( + executor, + self, + dataset_uuid=self._dataset_uuid, + preserve_order=preserve_order, + ) + bundle = RefBundle( + tuple(blocks.iter_blocks_with_metadata()), + owns_blocks=blocks._owned_by_consumer, + ) + stats = executor.get_stats() + stats_summary_string = stats.to_summary().to_string( + include_parent=False + ) + if context.enable_auto_log_stats: + logger.info(stats_summary_string) + + # Retrieve memory-related stats from ray. + try: + reply = get_memory_info_reply( + get_state_from_address(ray.get_runtime_context().gcs_address) + ) + if reply.store_stats.spill_time_total_s > 0: + stats.global_bytes_spilled = int( + reply.store_stats.spilled_bytes_total + ) + if reply.store_stats.restore_time_total_s > 0: + stats.global_bytes_restored = int( + reply.store_stats.restored_bytes_total + ) + except Exception as e: + logger.debug( + "Skipping recording memory spilled and restored statistics due to " + f"exception: {e}" + ) + + stats.dataset_bytes_spilled = 0 + + def collect_stats(cur_stats): + stats.dataset_bytes_spilled += cur_stats.extra_metrics.get( + "obj_store_mem_spilled", 0 + ) + for parent in cur_stats.parents: + collect_stats(parent) + + collect_stats(stats) + + # Set the snapshot to the output of the final operator. + self._snapshot_bundle = bundle + self._snapshot_operator = self._logical_plan.dag + self._snapshot_stats = stats + self._snapshot_stats.dataset_uuid = self._dataset_uuid + + return self._snapshot_bundle + + @property + def has_started_execution(self) -> bool: + """Return ``True`` if this plan has been partially or fully executed.""" + return self._has_started_execution + + def clear_snapshot(self) -> None: + """Clear the snapshot kept in the plan to the beginning state.""" + self._snapshot_bundle = None + self._snapshot_operator = None + self._snapshot_stats = None + + def stats(self) -> DatasetStats: + """Return stats for this plan. + + If the plan isn't executed, an empty stats object will be returned. + """ + if not self._snapshot_stats: + return DatasetStats(metadata={}, parent=None) + return self._snapshot_stats + + def has_lazy_input(self) -> bool: + """Return whether this plan has lazy input blocks.""" + return all(isinstance(op, Read) for op in self._logical_plan.sources()) + + def is_read_only(self, root_op: Optional[LogicalOperator] = None) -> bool: + """Return whether the LogicalPlan corresponding to `root_op` + contains only a Read op. By default, the last operator of + the LogicalPlan is used.""" + if root_op is None: + root_op = self._logical_plan.dag + return isinstance(root_op, Read) and len(root_op.input_dependencies) == 0 + + def has_computed_output(self) -> bool: + """Whether this plan has a computed snapshot for the final operator, i.e. for + the output of this plan. + """ + return ( + self._snapshot_bundle is not None + and self._snapshot_operator == self._logical_plan.dag + ) + + def require_preserve_order(self) -> bool: + """Whether this plan requires to preserve order.""" + from ray.data._internal.logical.operators.all_to_all_operator import Sort + from ray.data._internal.logical.operators.n_ary_operator import Zip + + for op in self._logical_plan.dag.post_order_iter(): + if isinstance(op, (Zip, Sort)): + return True + return False diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/row.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/row.py new file mode 100644 index 0000000000000000000000000000000000000000..a94edc1076412307061d02ee0a50a5bd7f059b6a --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/row.py @@ -0,0 +1,42 @@ +from collections.abc import Mapping +from typing import Any + + +class TableRow(Mapping): + """ + A dict-like row of a tabular ``Dataset``. + + This implements the dictionary mapping interface, but provides more + efficient access with less data copying than converting Arrow Tables + or Pandas DataFrames into per-row dicts. This class must be subclassed, + with subclasses implementing ``__getitem__``, ``__iter__``, and ``__len__``. + + Concrete subclasses include ``ray.data._internal.arrow_block.ArrowRow`` and + ``ray.data._internal.pandas_block.PandasRow``. + """ + + def __init__(self, row: Any): + """ + Construct a ``TableRow`` (internal API). + + Args: + row: The tabular row that backs this row mapping. + """ + self._row = row + + def as_pydict(self) -> dict: + """ + Convert to a normal Python dict. This will create a new copy of the row.""" + return dict(self.items()) + + def __str__(self): + return str(self.as_pydict()) + + def __repr__(self): + return str(self) + + def _repr_pretty_(self, p, cycle): + from IPython.lib.pretty import _dict_pprinter_factory + + pprinter = _dict_pprinter_factory("{", "}") + return pprinter(self, p, cycle) diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/split.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/split.py new file mode 100644 index 0000000000000000000000000000000000000000..3f7fe145af095f655828038fa1aff520dc0f6487 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/split.py @@ -0,0 +1,297 @@ +import itertools +import logging +from typing import Iterable, List, Tuple, Union + +import ray +from ray.data._internal.memory_tracing import trace_deallocation +from ray.data._internal.remote_fn import cached_remote_fn +from ray.data.block import ( + Block, + BlockAccessor, + BlockExecStats, + BlockMetadata, + BlockPartition, +) +from ray.types import ObjectRef + +logger = logging.getLogger(__name__) + + +def _calculate_blocks_rows( + blocks_with_metadata: BlockPartition, +) -> List[int]: + """Calculate the number of rows for a list of blocks with metadata.""" + get_num_rows = cached_remote_fn(_get_num_rows) + block_rows = [] + for block, metadata in blocks_with_metadata: + if metadata.num_rows is None: + # Need to fetch number of rows. + num_rows = ray.get(get_num_rows.remote(block)) + metadata.num_rows = num_rows + else: + num_rows = metadata.num_rows + block_rows.append(num_rows) + return block_rows + + +def _generate_valid_indices( + num_rows_per_block: List[int], + split_indices: List[int], +) -> List[int]: + """Generate valid split indices by apply min(index, total_num_rows) + to every index.""" + total_rows = sum(num_rows_per_block) + return [min(index, total_rows) for index in split_indices] + + +def _generate_per_block_split_indices( + num_rows_per_block: List[int], + split_indices: List[int], +) -> List[List[int]]: + """Given num rows per block and valid split indices, generate per block split indices. + + Args: + num_rows_per_block: num of rows per block. + split_indices: The (global) indices at which to split the blocks. + Returns: + Per block split indices indicates each input block's split point(s). + """ + # for each split index, we iterate though the currnet input block + # to see if the index falls into this block. if the index + # falls into this block, we push it back to the current block's + # split indices. Otherwise, we move on to the next block. + per_block_split_indices = [] + current_input_block_id = 0 + current_block_split_indices = [] + current_block_global_offset = 0 + current_index_id = 0 + + while current_index_id < len(split_indices): + split_index = split_indices[current_index_id] + current_block_row = num_rows_per_block[current_input_block_id] + if split_index - current_block_global_offset <= current_block_row: + current_block_split_indices.append( + split_index - current_block_global_offset + ) + current_index_id += 1 + continue + per_block_split_indices.append(current_block_split_indices) + current_block_split_indices = [] + current_block_global_offset += num_rows_per_block[current_input_block_id] + current_input_block_id += 1 + + # we might finished all the indices but there are still blocks left, also + # current_block_split_indices might not be added yet. + while len(per_block_split_indices) < len(num_rows_per_block): + per_block_split_indices.append(current_block_split_indices) + current_block_split_indices = [] + return per_block_split_indices + + +def _split_single_block( + block_id: int, + block: Block, + meta: BlockMetadata, + split_indices: List[int], +) -> Tuple[Union[Tuple[int, List[BlockMetadata]], Block], ...]: + """Split the provided block at the given indices. + + Args: + block_id: the id of this block in the block list. + block: block to be split. + meta: metadata of the block, we expect meta.num is valid. + split_indices: the indices where the block should be split. + Returns: + returns block_id, split blocks metadata, and a list of blocks + in the following form. We return blocks in this way + so that the owner of blocks could be the caller(driver) + instead of worker itself. + Tuple(block_id, split_blocks_meta), block0, block1 ... + """ + split_meta = [] + split_blocks = [] + block_accessor = BlockAccessor.for_block(block) + prev_index = 0 + # append one more entry at the last so we don't + # need handle empty edge case. + split_indices.append(meta.num_rows) + for index in split_indices: + logger.debug(f"slicing block {prev_index}:{index}") + stats = BlockExecStats.builder() + split_block = block_accessor.slice(prev_index, index) + accessor = BlockAccessor.for_block(split_block) + _meta = BlockMetadata( + num_rows=accessor.num_rows(), + size_bytes=accessor.size_bytes(), + schema=meta.schema, + input_files=meta.input_files, + exec_stats=stats.build(), + ) + split_meta.append(_meta) + split_blocks.append(split_block) + prev_index = index + results = [(block_id, split_meta)] + results.extend(split_blocks) + return tuple(results) + + +def _drop_empty_block_split(block_split_indices: List[int], num_rows: int) -> List[int]: + """drop split indices that creates empty block split. This could happen when there + are duplicated indices, or index equal to 0 (start of the block) or num_block_rows + (end of the block). + """ + prev_index = -1 + optimized_indices = [] + for index in block_split_indices: + if index == 0 or index == num_rows: + continue + if index == prev_index: + continue + optimized_indices.append(index) + prev_index = index + return optimized_indices + + +def _split_all_blocks( + blocks_with_metadata: List[Tuple[ObjectRef[Block], BlockMetadata]], + per_block_split_indices: List[List[int]], + owned_by_consumer: bool, +) -> Iterable[Tuple[ObjectRef[Block], BlockMetadata]]: + """Split all the input blocks based on the split indices""" + split_single_block = cached_remote_fn(_split_single_block) + + all_blocks_split_results: List[BlockPartition] = [None] * len(blocks_with_metadata) + + per_block_split_metadata_futures = [] + per_block_split_block_refs = [] + + # tracking splitted blocks for gc. + blocks_splitted = [] + for block_id, block_split_indices in enumerate(per_block_split_indices): + (block_ref, meta) = blocks_with_metadata[block_id] + block_row = meta.num_rows + block_split_indices = _drop_empty_block_split(block_split_indices, block_row) + if len(block_split_indices) == 0: + # optimization: if no split is needed, we just need to add it to the + # result + all_blocks_split_results[block_id] = [(block_ref, meta)] + else: + # otherwise call split remote function. + object_refs = split_single_block.options( + scheduling_strategy="SPREAD", num_returns=2 + len(block_split_indices) + ).remote( + block_id, + block_ref, + meta, + block_split_indices, + ) + per_block_split_metadata_futures.append(object_refs[0]) + per_block_split_block_refs.append(object_refs[1:]) + + blocks_splitted.append(block_ref) + + if per_block_split_metadata_futures: + # only get metadata. + per_block_split_metadata = ray.get(per_block_split_metadata_futures) + for (block_id, meta), block_refs in zip( + per_block_split_metadata, per_block_split_block_refs + ): + assert len(meta) == len(block_refs) + all_blocks_split_results[block_id] = zip(block_refs, meta) + + # We make a copy for the blocks that have been splitted, so the input blocks + # can be cleared if they are owned by consumer (consumer-owned blocks will + # only be consumed by the owner). + if owned_by_consumer: + for b in blocks_splitted: + trace_deallocation(b, "split._split_all_blocks") + else: + for b in blocks_splitted: + trace_deallocation(b, "split._split_all_blocks", free=False) + + return itertools.chain.from_iterable(all_blocks_split_results) + + +def _generate_global_split_results( + all_blocks_split_results: Iterable[Tuple[ObjectRef[Block], BlockMetadata]], + global_split_sizes: List[int], +) -> Tuple[List[List[ObjectRef[Block]]], List[List[BlockMetadata]]]: + """Reassemble per block's split result into final split result.""" + result_blocks = [] + result_metas = [] + + current_blocks = [] + current_meta = [] + current_split_size = 0 + current_split_id = 0 + + while current_split_id < len(global_split_sizes): + if current_split_size >= global_split_sizes[current_split_id]: + assert current_split_size == global_split_sizes[current_split_id] + result_blocks.append(current_blocks) + result_metas.append(current_meta) + + current_blocks = [] + current_meta = [] + current_split_size = 0 + current_split_id += 1 + else: + (block_ref, meta) = next(all_blocks_split_results) + current_blocks.append(block_ref) + current_meta.append(meta) + current_split_size += meta.num_rows + + return result_blocks, result_metas + + +def _split_at_indices( + blocks_with_metadata: List[Tuple[ObjectRef[Block], BlockMetadata]], + indices: List[int], + owned_by_consumer: bool = True, + block_rows: List[int] = None, +) -> Tuple[List[List[ObjectRef[Block]]], List[List[BlockMetadata]]]: + """Split blocks at the provided indices. + + Args: + blocks_with_metadata: Block futures to split, including the associated metadata. + indices: The (global) indices at which to split the blocks. + owned_by_consumer: Whether the provided blocks are owned by the consumer. + block_rows: The number of rows for each block, in case it has already been + computed. + + Returns: + The block split futures and their metadata. If an index split is empty, the + corresponding block split will be empty . + """ + + # We implement the split in 3 phases. + # phase 1: calculate the per block split indices. + blocks_with_metadata = list(blocks_with_metadata) + if len(blocks_with_metadata) == 0: + return ([[]] * (len(indices) + 1), [[]] * (len(indices) + 1)) + if block_rows is None: + block_rows = _calculate_blocks_rows(blocks_with_metadata) + valid_indices = _generate_valid_indices(block_rows, indices) + per_block_split_indices: List[List[int]] = _generate_per_block_split_indices( + block_rows, valid_indices + ) + + # phase 2: split each block based on the indices from previous step. + all_blocks_split_results: Iterable[ + Tuple[ObjectRef[Block], BlockMetadata] + ] = _split_all_blocks( + blocks_with_metadata, per_block_split_indices, owned_by_consumer + ) + + # phase 3: generate the final split. + + # first calculate the size for each split. + helper = [0] + valid_indices + [sum(block_rows)] + split_sizes = [helper[i] - helper[i - 1] for i in range(1, len(helper))] + + return _generate_global_split_results(all_blocks_split_results, split_sizes) + + +def _get_num_rows(block: Block) -> int: + """Get the number of rows contained in the provided block.""" + return BlockAccessor.for_block(block).num_rows() diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/stats.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/stats.py new file mode 100644 index 0000000000000000000000000000000000000000..fc6903cd92e2c59d251afa73832e970ad5151472 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/stats.py @@ -0,0 +1,1495 @@ +import collections +import logging +import threading +import time +from contextlib import contextmanager +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Set, Tuple, Union +from uuid import uuid4 + +import numpy as np + +import ray +from ray.actor import ActorHandle +from ray.data._internal.block_list import BlockList +from ray.data._internal.execution.interfaces.op_runtime_metrics import ( + MetricsGroup, + OpRuntimeMetrics, +) +from ray.data._internal.util import capfirst +from ray.data.block import BlockMetadata +from ray.data.context import DataContext +from ray.util.annotations import DeveloperAPI +from ray.util.metrics import Gauge +from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy + +logger = logging.getLogger(__name__) + +STATS_ACTOR_NAME = "datasets_stats_actor" +STATS_ACTOR_NAMESPACE = "_dataset_stats_actor" + + +StatsDict = Dict[str, List[BlockMetadata]] + + +def fmt(seconds: float) -> str: + if seconds > 1: + return str(round(seconds, 2)) + "s" + elif seconds > 0.001: + return str(round(seconds * 1000, 2)) + "ms" + else: + return str(round(seconds * 1000 * 1000, 2)) + "us" + + +def leveled_indent(lvl: int = 0, spaces_per_indent: int = 3) -> str: + """Returns a string of spaces which contains `level` indents, + each indent containing `spaces_per_indent` spaces. For example: + >>> leveled_indent(2, 3) + ' ' + """ + return (" " * spaces_per_indent) * lvl + + +class Timer: + """Helper class for tracking accumulated time (in seconds).""" + + def __init__(self): + self._value: float = 0 + self._min: float = float("inf") + self._max: float = 0 + self._total_count: float = 0 + + @contextmanager + def timer(self) -> None: + time_start = time.perf_counter() + try: + yield + finally: + self.add(time.perf_counter() - time_start) + + def add(self, value: float) -> None: + self._value += value + if value < self._min: + self._min = value + if value > self._max: + self._max = value + self._total_count += 1 + + def get(self) -> float: + return self._value + + def min(self) -> float: + return self._min + + def max(self) -> float: + return self._max + + def avg(self) -> float: + return self._value / self._total_count if self._total_count else float("inf") + + +class _DatasetStatsBuilder: + """Helper class for building dataset stats. + + When this class is created, we record the start time. When build() is + called with the final blocks of the new dataset, the time delta is + saved as part of the stats.""" + + def __init__( + self, + operator_name: str, + parent: "DatasetStats", + override_start_time: Optional[float], + ): + self.operator_name = operator_name + self.parent = parent + self.start_time = override_start_time or time.perf_counter() + + def build_multioperator(self, metadata: StatsDict) -> "DatasetStats": + op_metadata = {} + for i, (k, v) in enumerate(metadata.items()): + capped_k = capfirst(k) + if len(metadata) > 1: + if i == 0: + op_metadata[self.operator_name + capped_k] = v + else: + op_metadata[self.operator_name.split("->")[-1] + capped_k] = v + else: + op_metadata[self.operator_name] = v + stats = DatasetStats( + metadata=op_metadata, + parent=self.parent, + base_name=self.operator_name, + ) + stats.time_total_s = time.perf_counter() - self.start_time + return stats + + def build(self, final_blocks: BlockList) -> "DatasetStats": + stats = DatasetStats( + metadata={self.operator_name: final_blocks.get_metadata()}, + parent=self.parent, + ) + stats.time_total_s = time.perf_counter() - self.start_time + return stats + + +@ray.remote(num_cpus=0) +class _StatsActor: + """Actor holding stats for blocks created by LazyBlockList. + + This actor is shared across all datasets created in the same cluster. + In order to cap memory usage, we set a max number of stats to keep + in the actor. When this limit is exceeded, the stats will be garbage + collected in FIFO order. + + TODO(ekl) we should consider refactoring LazyBlockList so stats can be + extracted without using an out-of-band actor.""" + + def __init__(self, max_stats=1000): + # Mapping from uuid -> (task_id -> list of blocks statistics). + self.metadata = collections.defaultdict(dict) + self.last_time = {} + self.start_time = {} + self.max_stats = max_stats + self.fifo_queue = [] + + # Assign dataset uuids with a global counter. + self.next_dataset_id = 0 + # Dataset metadata to be queried directly by DashboardHead api. + self.datasets: Dict[str, Any] = {} + + # Ray Data dashboard metrics + # Everything is a gauge because we need to reset all of + # a dataset's metrics to 0 after each finishes execution. + op_tags_keys = ("dataset", "operator") + + # TODO(scottjlee): move these overvie metrics as fields in a + # separate dataclass, similar to OpRuntimeMetrics. + self.spilled_bytes = Gauge( + "data_spilled_bytes", + description="""Bytes spilled by dataset operators. + DataContext.enable_get_object_locations_for_metrics + must be set to True to report this metric""", + tag_keys=op_tags_keys, + ) + self.allocated_bytes = Gauge( + "data_allocated_bytes", + description="Bytes allocated by dataset operators", + tag_keys=op_tags_keys, + ) + self.freed_bytes = Gauge( + "data_freed_bytes", + description="Bytes freed by dataset operators", + tag_keys=op_tags_keys, + ) + self.current_bytes = Gauge( + "data_current_bytes", + description="Bytes currently in memory store used by dataset operators", + tag_keys=op_tags_keys, + ) + self.cpu_usage_cores = Gauge( + "data_cpu_usage_cores", + description="CPUs allocated to dataset operators", + tag_keys=op_tags_keys, + ) + self.gpu_usage_cores = Gauge( + "data_gpu_usage_cores", + description="GPUs allocated to dataset operators", + tag_keys=op_tags_keys, + ) + self.output_bytes = Gauge( + "data_output_bytes", + description="Bytes outputted by dataset operators", + tag_keys=op_tags_keys, + ) + self.output_rows = Gauge( + "data_output_rows", + description="Rows outputted by dataset operators", + tag_keys=op_tags_keys, + ) + + # === Metrics from OpRuntimeMetrics === + # Inputs-related metrics + self.execution_metrics_inputs = ( + self._create_prometheus_metrics_for_execution_metrics( + metrics_group=MetricsGroup.INPUTS, + tag_keys=op_tags_keys, + ) + ) + + # Outputs-related metrics + self.execution_metrics_outputs = ( + self._create_prometheus_metrics_for_execution_metrics( + metrics_group=MetricsGroup.OUTPUTS, + tag_keys=op_tags_keys, + ) + ) + + # Task-related metrics + self.execution_metrics_tasks = ( + self._create_prometheus_metrics_for_execution_metrics( + metrics_group=MetricsGroup.TASKS, + tag_keys=op_tags_keys, + ) + ) + + # Object store memory-related metrics + self.execution_metrics_obj_store_memory = ( + self._create_prometheus_metrics_for_execution_metrics( + metrics_group=MetricsGroup.OBJECT_STORE_MEMORY, + tag_keys=op_tags_keys, + ) + ) + + # Miscellaneous metrics + self.execution_metrics_misc = ( + self._create_prometheus_metrics_for_execution_metrics( + metrics_group=MetricsGroup.MISC, + tag_keys=op_tags_keys, + ) + ) + + iter_tag_keys = ("dataset",) + self.iter_total_blocked_s = Gauge( + "data_iter_total_blocked_seconds", + description="Seconds user thread is blocked by iter_batches()", + tag_keys=iter_tag_keys, + ) + self.iter_user_s = Gauge( + "data_iter_user_seconds", + description="Seconds spent in user code", + tag_keys=iter_tag_keys, + ) + self.iter_initialize_s = Gauge( + "data_iter_initialize_seconds", + description="Seconds spent in iterator initialization code", + tag_keys=iter_tag_keys, + ) + + def _create_prometheus_metrics_for_execution_metrics( + self, metrics_group: MetricsGroup, tag_keys: Tuple[str, ...] + ) -> Dict[str, Gauge]: + metrics = {} + for metric in OpRuntimeMetrics.get_metrics(): + if not metric.metrics_group == metrics_group: + continue + metric_name = f"data_{metric.name}" + metric_description = metric.description + metrics[metric.name] = Gauge( + metric_name, + description=metric_description, + tag_keys=tag_keys, + ) + return metrics + + def record_start(self, stats_uuid): + self.start_time[stats_uuid] = time.perf_counter() + self.fifo_queue.append(stats_uuid) + # Purge the oldest stats if the limit is exceeded. + if len(self.fifo_queue) > self.max_stats: + uuid = self.fifo_queue.pop(0) + if uuid in self.start_time: + del self.start_time[uuid] + if uuid in self.last_time: + del self.last_time[uuid] + if uuid in self.metadata: + del self.metadata[uuid] + + def record_task( + self, stats_uuid: str, task_idx: int, blocks_metadata: List[BlockMetadata] + ): + # Null out the schema to keep the stats size small. + # TODO(chengsu): ideally schema should be null out on caller side. + for metadata in blocks_metadata: + metadata.schema = None + if stats_uuid in self.start_time: + self.metadata[stats_uuid][task_idx] = blocks_metadata + self.last_time[stats_uuid] = time.perf_counter() + + def get(self, stats_uuid): + if stats_uuid not in self.metadata: + return {}, 0.0 + return ( + self.metadata[stats_uuid], + self.last_time[stats_uuid] - self.start_time[stats_uuid], + ) + + def _get_stats_dict_size(self): + return len(self.start_time), len(self.last_time), len(self.metadata) + + def get_dataset_id(self): + dataset_id = str(self.next_dataset_id) + self.next_dataset_id += 1 + return dataset_id + + def update_metrics(self, execution_metrics, iteration_metrics): + for metrics in execution_metrics: + self.update_execution_metrics(*metrics) + for metrics in iteration_metrics: + self.update_iteration_metrics(*metrics) + + def update_execution_metrics( + self, + dataset_tag: str, + op_metrics: List[Dict[str, Union[int, float]]], + operator_tags: List[str], + state: Dict[str, Any], + ): + for stats, operator_tag in zip(op_metrics, operator_tags): + tags = self._create_tags(dataset_tag, operator_tag) + + self.spilled_bytes.set(stats.get("obj_store_mem_spilled", 0), tags) + self.freed_bytes.set(stats.get("obj_store_mem_freed", 0), tags) + self.current_bytes.set(stats.get("obj_store_mem_used", 0), tags) + self.output_bytes.set(stats.get("bytes_task_outputs_generated", 0), tags) + self.output_rows.set(stats.get("rows_task_outputs_generated", 0), tags) + self.cpu_usage_cores.set(stats.get("cpu_usage", 0), tags) + self.gpu_usage_cores.set(stats.get("gpu_usage", 0), tags) + + for field_name, prom_metric in self.execution_metrics_inputs.items(): + prom_metric.set(stats.get(field_name, 0), tags) + + for field_name, prom_metric in self.execution_metrics_outputs.items(): + prom_metric.set(stats.get(field_name, 0), tags) + + for field_name, prom_metric in self.execution_metrics_tasks.items(): + prom_metric.set(stats.get(field_name, 0), tags) + + for ( + field_name, + prom_metric, + ) in self.execution_metrics_obj_store_memory.items(): + prom_metric.set(stats.get(field_name, 0), tags) + + for field_name, prom_metric in self.execution_metrics_misc.items(): + prom_metric.set(stats.get(field_name, 0), tags) + + # This update is called from a dataset's executor, + # so all tags should contain the same dataset + self.update_dataset(dataset_tag, state) + + def update_iteration_metrics( + self, + stats: "DatasetStats", + dataset_tag, + ): + tags = self._create_tags(dataset_tag) + self.iter_total_blocked_s.set(stats.iter_total_blocked_s.get(), tags) + self.iter_user_s.set(stats.iter_user_s.get(), tags) + self.iter_initialize_s.set(stats.iter_initialize_s.get(), tags) + + def register_dataset(self, job_id: str, dataset_tag: str, operator_tags: List[str]): + self.datasets[dataset_tag] = { + "job_id": job_id, + "state": "RUNNING", + "progress": 0, + "total": 0, + "start_time": time.time(), + "end_time": None, + "operators": { + operator: { + "state": "RUNNING", + "progress": 0, + "total": 0, + } + for operator in operator_tags + }, + } + + def update_dataset(self, dataset_tag, state): + self.datasets[dataset_tag].update(state) + + def get_datasets(self, job_id: Optional[str] = None): + if not job_id: + return self.datasets + return {k: v for k, v in self.datasets.items() if v["job_id"] == job_id} + + def _create_tags(self, dataset_tag: str, operator_tag: Optional[str] = None): + tags = {"dataset": dataset_tag} + if operator_tag is not None: + tags["operator"] = operator_tag + return tags + + +# Creating/getting an actor from multiple threads is not safe. +# https://github.com/ray-project/ray/issues/41324 +_stats_actor_lock: threading.RLock = threading.RLock() + + +def _get_or_create_stats_actor(): + ctx = DataContext.get_current() + scheduling_strategy = ctx.scheduling_strategy + if not ray.util.client.ray.is_connected(): + # Pin the stats actor to the local node + # so it fate-shares with the driver. + scheduling_strategy = NodeAffinitySchedulingStrategy( + ray.get_runtime_context().get_node_id(), + soft=False, + ) + with _stats_actor_lock: + return _StatsActor.options( + name=STATS_ACTOR_NAME, + namespace=STATS_ACTOR_NAMESPACE, + get_if_exists=True, + lifetime="detached", + scheduling_strategy=scheduling_strategy, + ).remote() + + +class _StatsManager: + """A Class containing util functions that manage remote calls to _StatsActor. + + This class collects stats from execution and iteration codepaths and keeps + track of the latest snapshot. + + An instance of this class runs a single background thread that periodically + forwards the latest execution/iteration stats to the _StatsActor. + + This thread will terminate itself after being inactive (meaning that there are + no active executors or iterators) for STATS_ACTOR_UPDATE_THREAD_INACTIVITY_LIMIT + iterations. After terminating, a new thread will start if more calls are made + to this class. + """ + + # Interval for making remote calls to the _StatsActor. + STATS_ACTOR_UPDATE_INTERVAL_SECONDS = 5 + + # After this many iterations of inactivity, + # _StatsManager._update_thread will close itself. + UPDATE_THREAD_INACTIVITY_LIMIT = 5 + + def __init__(self): + # Lazily get stats actor handle to avoid circular import. + self._stats_actor_handle: Optional[ActorHandle] = None + self._stats_actor_cluster_id = None + + # Last execution stats snapshots for all executing datasets + self._last_execution_stats = {} + # Last iteration stats snapshots for all running iterators + self._last_iteration_stats: Dict[ + str, Tuple[Dict[str, str], "DatasetStats"] + ] = {} + # Lock for updating stats snapshots + self._stats_lock: threading.Lock = threading.Lock() + + # Background thread to make remote calls to _StatsActor + self._update_thread: Optional[threading.Thread] = None + self._update_thread_lock: threading.Lock = threading.Lock() + + def _stats_actor(self, create_if_not_exists=True) -> Optional[ActorHandle]: + if ray._private.worker._global_node is None: + raise RuntimeError("Global node is not initialized.") + current_cluster_id = ray._private.worker._global_node.cluster_id + if ( + self._stats_actor_handle is None + or self._stats_actor_cluster_id != current_cluster_id + ): + if create_if_not_exists: + self._stats_actor_handle = _get_or_create_stats_actor() + else: + try: + self._stats_actor_handle = ray.get_actor( + name=STATS_ACTOR_NAME, namespace=STATS_ACTOR_NAMESPACE + ) + except ValueError: + return None + self._stats_actor_cluster_id = current_cluster_id + return self._stats_actor_handle + + def _start_thread_if_not_running(self): + # Start background update thread if not running. + with self._update_thread_lock: + if self._update_thread is None or not self._update_thread.is_alive(): + + def _run_update_loop(): + iter_stats_inactivity = 0 + while True: + if self._last_iteration_stats or self._last_execution_stats: + try: + # Do not create _StatsActor if it doesn't exist because + # this thread can be running even after the cluster is + # shutdown. Creating an actor will automatically start + # a new cluster. + stats_actor = self._stats_actor( + create_if_not_exists=False + ) + if stats_actor is None: + continue + stats_actor.update_metrics.remote( + execution_metrics=list( + self._last_execution_stats.values() + ), + iteration_metrics=list( + self._last_iteration_stats.values() + ), + ) + iter_stats_inactivity = 0 + except Exception: + logger.debug( + "Error occurred during remote call to _StatsActor.", + exc_info=True, + ) + return + else: + iter_stats_inactivity += 1 + if ( + iter_stats_inactivity + >= _StatsManager.UPDATE_THREAD_INACTIVITY_LIMIT + ): + logger.debug( + "Terminating StatsManager thread due to inactivity." + ) + return + time.sleep(StatsManager.STATS_ACTOR_UPDATE_INTERVAL_SECONDS) + + self._update_thread = threading.Thread( + target=_run_update_loop, daemon=True + ) + self._update_thread.start() + + # Execution methods + + def update_execution_metrics( + self, + dataset_tag: str, + op_metrics: List[OpRuntimeMetrics], + operator_tags: List[str], + state: Dict[str, Any], + force_update: bool = False, + ): + op_metrics_dicts = [metric.as_dict() for metric in op_metrics] + args = (dataset_tag, op_metrics_dicts, operator_tags, state) + if force_update: + self._stats_actor().update_execution_metrics.remote(*args) + else: + with self._stats_lock: + self._last_execution_stats[dataset_tag] = args + self._start_thread_if_not_running() + + def clear_last_execution_stats(self, dataset_tag: str): + # After dataset completes execution, remove cached execution stats. + # Marks the dataset as finished on job page's Ray Data Overview. + with self._stats_lock: + if dataset_tag in self._last_execution_stats: + del self._last_execution_stats[dataset_tag] + + # Iteration methods + + def update_iteration_metrics(self, stats: "DatasetStats", dataset_tag: str): + with self._stats_lock: + self._last_iteration_stats[dataset_tag] = (stats, dataset_tag) + self._start_thread_if_not_running() + + def clear_iteration_metrics(self, dataset_tag: str): + # Delete the last iteration stats so that update thread will have + # a chance to terminate. + # Note we don't reset the actual metric values through the StatsActor + # since the value is essentially a counter value. See + # https://github.com/ray-project/ray/pull/48618 for more context. + with self._stats_lock: + if dataset_tag in self._last_iteration_stats: + del self._last_iteration_stats[dataset_tag] + + # Other methods + + def register_dataset_to_stats_actor(self, dataset_tag, operator_tags): + self._stats_actor().register_dataset.remote( + ray.get_runtime_context().get_job_id(), + dataset_tag, + operator_tags, + ) + + def get_dataset_id_from_stats_actor(self) -> str: + try: + return ray.get(self._stats_actor().get_dataset_id.remote()) + except Exception: + # Getting dataset id from _StatsActor may fail, in this case + # fall back to uuid4 + return uuid4().hex + + +StatsManager = _StatsManager() + + +class DatasetStats: + """Holds the execution times for a given Dataset. + + This object contains a reference to the parent Dataset's stats as well, + but not the Dataset object itself, to allow its blocks to be dropped from + memory.""" + + def __init__( + self, + *, + metadata: StatsDict, + parent: Union[Optional["DatasetStats"], List["DatasetStats"]], + needs_stats_actor: bool = False, + stats_uuid: str = None, + base_name: str = None, + ): + """Create dataset stats. + + Args: + metadata: Dict of operators used to create this Dataset from the + previous one. Typically one entry, e.g., {"map": [...]}. + parent: Reference to parent Dataset's stats, or a list of parents + if there are multiple. + needs_stats_actor: Whether this Dataset's stats needs a stats actor for + stats collection. This is currently only used for Datasets using a + lazy datasource (i.e. a LazyBlockList). + stats_uuid: The uuid for the stats, used to fetch the right stats + from the stats actor. + base_name: The name of the base operation for a multi-operator operation. + """ + + self.metadata: StatsDict = metadata + if parent is not None and not isinstance(parent, list): + parent = [parent] + self.parents: List["DatasetStats"] = parent or [] + self.number: int = ( + 0 if not self.parents else max(p.number for p in self.parents) + 1 + ) + self.base_name = base_name + # TODO(ekl) deprecate and remove the notion of dataset UUID once we move + # fully to streaming execution. + self.dataset_uuid: str = "unknown_uuid" + self.time_total_s: float = 0 + self.needs_stats_actor = needs_stats_actor + self.stats_uuid = stats_uuid + + # Streaming executor stats + self.streaming_exec_schedule_s: Timer = Timer() + + # Iteration stats, filled out if the user iterates over the dataset. + self.iter_wait_s: Timer = Timer() + self.iter_get_s: Timer = Timer() + self.iter_next_batch_s: Timer = Timer() + self.iter_format_batch_s: Timer = Timer() + self.iter_collate_batch_s: Timer = Timer() + self.iter_finalize_batch_s: Timer = Timer() + self.iter_total_blocked_s: Timer = Timer() + self.iter_user_s: Timer = Timer() + self.iter_initialize_s: Timer = Timer() + self.iter_total_s: Timer = Timer() + self.extra_metrics = {} + + # Block fetch stats during iteration. + # These are stats about locations of blocks when the iterator is trying to + # consume them. The iteration performance will be affected depending on + # whether the block is in the local object store of the node where the + # iterator is running. + # This serves as an indicator of block prefetching effectiveness. + self.iter_blocks_local: int = 0 + self.iter_blocks_remote: int = 0 + self.iter_unknown_location: int = 0 + + # Memory usage stats + self.global_bytes_spilled: int = 0 + self.global_bytes_restored: int = 0 + self.dataset_bytes_spilled: int = 0 + + # Streaming split coordinator stats (dataset level) + self.streaming_split_coordinator_s: Timer = Timer() + + @property + def stats_actor(self): + return _get_or_create_stats_actor() + + def child_builder( + self, name: str, override_start_time: Optional[float] = None + ) -> _DatasetStatsBuilder: + """Start recording stats for an op of the given name (e.g., map).""" + return _DatasetStatsBuilder(name, self, override_start_time) + + def to_summary(self) -> "DatasetStatsSummary": + """Generate a `DatasetStatsSummary` object from the given `DatasetStats` + object, which can be used to generate a summary string.""" + if self.needs_stats_actor: + ac = self.stats_actor + # TODO(chengsu): this is a super hack, clean it up. + stats_map, self.time_total_s = ray.get(ac.get.remote(self.stats_uuid)) + # Only populate stats when stats from all read tasks are ready at + # stats actor. + if len(stats_map.items()) == len(self.metadata["Read"]): + self.metadata["Read"] = [] + for _, blocks_metadata in sorted(stats_map.items()): + self.metadata["Read"] += blocks_metadata + + operators_stats = [] + is_sub_operator = len(self.metadata) > 1 + for name, meta in self.metadata.items(): + operators_stats.append( + OperatorStatsSummary.from_block_metadata( + name, + meta, + is_sub_operator=is_sub_operator, + ) + ) + + iter_stats = IterStatsSummary( + self.iter_wait_s, + self.iter_get_s, + self.iter_next_batch_s, + self.iter_format_batch_s, + self.iter_collate_batch_s, + self.iter_finalize_batch_s, + self.iter_total_blocked_s, + self.iter_user_s, + self.iter_initialize_s, + self.iter_total_s, + self.streaming_split_coordinator_s, + self.iter_blocks_local, + self.iter_blocks_remote, + self.iter_unknown_location, + ) + stats_summary_parents = [] + if self.parents is not None: + stats_summary_parents = [p.to_summary() for p in self.parents] + streaming_exec_schedule_s = ( + self.streaming_exec_schedule_s.get() + if self.streaming_exec_schedule_s + else 0 + ) + return DatasetStatsSummary( + operators_stats, + iter_stats, + stats_summary_parents, + self.number, + self.dataset_uuid, + self.time_total_s, + self.base_name, + self.extra_metrics, + self.global_bytes_spilled, + self.global_bytes_restored, + self.dataset_bytes_spilled, + streaming_exec_schedule_s, + ) + + def runtime_metrics(self) -> str: + """Generate a string representing the runtime metrics of a Dataset. This is + a high level summary of the time spent in Ray Data code broken down by operator. + It also includes the time spent in the scheduler. Times are shown as the total + time for each operator and percentages of time are shown as a fraction of the + total time for the whole dataset.""" + return self.to_summary().runtime_metrics() + + +@DeveloperAPI +@dataclass +class DatasetStatsSummary: + operators_stats: List["OperatorStatsSummary"] + iter_stats: "IterStatsSummary" + parents: List["DatasetStatsSummary"] + number: int + dataset_uuid: str + time_total_s: float + base_name: str + extra_metrics: Dict[str, Any] + global_bytes_spilled: int + global_bytes_restored: int + dataset_bytes_spilled: int + streaming_exec_schedule_s: float + + def to_string( + self, + already_printed: Optional[Set[str]] = None, + include_parent: bool = True, + add_global_stats=True, + ) -> str: + """Return a human-readable summary of this Dataset's stats. + + Args: + already_printed: Set of operator IDs that have already had its stats printed + out. + include_parent: If true, also include parent stats summary; otherwise, only + log stats of the latest operator. + add_global_stats: If true, includes global stats to this summary. + Returns: + String with summary statistics for executing the Dataset. + """ + if already_printed is None: + already_printed = set() + + out = "" + if self.parents and include_parent: + for p in self.parents: + parent_sum = p.to_string(already_printed, add_global_stats=False) + if parent_sum: + out += parent_sum + out += "\n" + operators_stats_summary = None + if len(self.operators_stats) == 1: + operators_stats_summary = self.operators_stats[0] + operator_name = operators_stats_summary.operator_name + operator_uuid = self.dataset_uuid + operator_name + out += "Operator {} {}: ".format(self.number, operator_name) + if operator_uuid in already_printed: + out += "[execution cached]\n" + else: + already_printed.add(operator_uuid) + out += str(operators_stats_summary) + elif len(self.operators_stats) > 1: + rounded_total = round(self.time_total_s, 2) + if rounded_total <= 0: + # Handle -0.0 case. + rounded_total = 0 + out += "Operator {} {}: executed in {}s\n".format( + self.number, self.base_name, rounded_total + ) + for n, operators_stats_summary in enumerate(self.operators_stats): + operator_name = operators_stats_summary.operator_name + operator_uuid = self.dataset_uuid + operator_name + out += "\n" + out += "\tSuboperator {} {}: ".format(n, operator_name) + if operator_uuid in already_printed: + out += "\t[execution cached]\n" + else: + already_printed.add(operator_uuid) + out += str(operators_stats_summary) + verbose_stats_logs = DataContext.get_current().verbose_stats_logs + if verbose_stats_logs and self.extra_metrics: + indent = ( + "\t" + if operators_stats_summary and operators_stats_summary.is_sub_operator + else "" + ) + out += indent + out += "* Extra metrics: " + str(self.extra_metrics) + "\n" + out += str(self.iter_stats) + + if len(self.operators_stats) > 0 and add_global_stats: + mb_spilled = round(self.global_bytes_spilled / 1e6) + mb_restored = round(self.global_bytes_restored / 1e6) + if mb_spilled or mb_restored: + out += "\nCluster memory:\n" + out += "* Spilled to disk: {}MB\n".format(mb_spilled) + out += "* Restored from disk: {}MB\n".format(mb_restored) + + dataset_mb_spilled = round(self.dataset_bytes_spilled / 1e6) + if dataset_mb_spilled: + out += "\nDataset memory:\n" + out += "* Spilled to disk: {}MB\n".format(dataset_mb_spilled) + + # For throughput, we compute both an observed Ray Data dataset throughput + # and an estimated single node dataset throughput. + + # The observed dataset throughput is computed by dividing the total number + # of rows produced by the total wall time of the dataset (i.e. from start to + # finish how long did the dataset take to be processed). With the recursive + # nature of the DatasetStatsSummary, we use get_total_wall_time to determine + # the total wall time (this finds the difference between the earliest start + # and latest end for any block in any operator). + + # The estimated single node dataset throughput is computed by dividing the + # total number of rows produced the sum of the wall times across all blocks + # of all operators. This assumes that on a single node the work done would + # be equivalent, with no concurrency. + output_num_rows = self.operators_stats[-1].output_num_rows + total_num_out_rows = output_num_rows["sum"] if output_num_rows else 0 + wall_time = self.get_total_wall_time() + total_time_all_blocks = self.get_total_time_all_blocks() + if total_num_out_rows and wall_time and total_time_all_blocks: + out += "\n" + out += "Dataset throughput:\n" + out += ( + "\t* Ray Data throughput:" + f" {total_num_out_rows / wall_time} " + "rows/s\n" + ) + out += ( + "\t* Estimated single node throughput:" + f" {total_num_out_rows / total_time_all_blocks} " + "rows/s\n" + ) + if verbose_stats_logs and add_global_stats: + out += "\n" + self.runtime_metrics() + + return out + + @staticmethod + def _collect_dataset_stats_summaries( + curr: "DatasetStatsSummary", + ) -> List["DatasetStatsSummary"]: + summs = [] + # TODO: Do operators ever have multiple parents? Do we need to deduplicate? + for p in curr.parents: + if p and p.parents: + summs.extend(DatasetStatsSummary._collect_dataset_stats_summaries(p)) + return summs + [curr] + + @staticmethod + def _find_start_and_end(summ: "DatasetStatsSummary") -> Tuple[float, float]: + earliest_start = min(ops.earliest_start_time for ops in summ.operators_stats) + latest_end = max(ops.latest_end_time for ops in summ.operators_stats) + return earliest_start, latest_end + + def runtime_metrics(self) -> str: + total_wall_time = self.get_total_wall_time() + + def fmt_line(name: str, time: float) -> str: + return f"* {name}: {fmt(time)} ({time / total_wall_time * 100:.3f}%)\n" + + summaries = DatasetStatsSummary._collect_dataset_stats_summaries(self) + out = "Runtime Metrics:\n" + for summ in summaries: + if len(summ.operators_stats) > 0: + earliest_start, latest_end = DatasetStatsSummary._find_start_and_end( + summ + ) + op_total_time = latest_end - earliest_start + out += fmt_line(summ.base_name, op_total_time) + out += fmt_line("Scheduling", self.streaming_exec_schedule_s) + out += fmt_line("Total", total_wall_time) + return out + + def __repr__(self, level=0) -> str: + indent = leveled_indent(level) + operators_stats = "\n".join( + [ss.__repr__(level + 2) for ss in self.operators_stats] + ) + parent_stats = "\n".join([ps.__repr__(level + 2) for ps in self.parents]) + extra_metrics = "\n".join( + f"{leveled_indent(level + 2)}{k}: {v}," + for k, v in self.extra_metrics.items() + ) + + # Handle formatting case for empty outputs. + operators_stats = ( + f"\n{operators_stats},\n{indent} " if operators_stats else "" + ) + parent_stats = f"\n{parent_stats},\n{indent} " if parent_stats else "" + extra_metrics = f"\n{extra_metrics}\n{indent} " if extra_metrics else "" + return ( + f"{indent}DatasetStatsSummary(\n" + f"{indent} dataset_uuid={self.dataset_uuid},\n" + f"{indent} base_name={self.base_name},\n" + f"{indent} number={self.number},\n" + f"{indent} extra_metrics={{{extra_metrics}}},\n" + f"{indent} operators_stats=[{operators_stats}],\n" + f"{indent} iter_stats={self.iter_stats.__repr__(level+1)},\n" + f"{indent} global_bytes_spilled={self.global_bytes_spilled / 1e6}MB,\n" + f"{indent} global_bytes_restored={self.global_bytes_restored / 1e6}MB,\n" + f"{indent} dataset_bytes_spilled={self.dataset_bytes_spilled / 1e6}MB,\n" + f"{indent} parents=[{parent_stats}],\n" + f"{indent})" + ) + + def get_total_wall_time(self) -> float: + """Calculate the total wall time for the dataset, this is done by finding + the earliest start time and latest end time for any block in any operator. + The wall time is the difference of these two times. + """ + start_ends = [ + DatasetStatsSummary._find_start_and_end(summ) + for summ in DatasetStatsSummary._collect_dataset_stats_summaries(self) + if len(summ.operators_stats) > 0 + ] + if len(start_ends) == 0: + return 0 + else: + earliest_start = min(start_end[0] for start_end in start_ends) + latest_end = max(start_end[1] for start_end in start_ends) + return latest_end - earliest_start + + def get_total_time_all_blocks(self) -> float: + """Calculate the sum of the wall times across all blocks of all operators.""" + summaries = DatasetStatsSummary._collect_dataset_stats_summaries(self) + return sum( + ( + sum( + ops.wall_time.get("sum", 0) if ops.wall_time else 0 + for ops in summ.operators_stats + ) + ) + for summ in summaries + ) + + def get_total_cpu_time(self) -> float: + parent_sum = sum(p.get_total_cpu_time() for p in self.parents) + return parent_sum + sum( + ss.cpu_time.get("sum", 0) for ss in self.operators_stats + ) + + def get_max_heap_memory(self) -> float: + parent_memory = [p.get_max_heap_memory() for p in self.parents] + parent_max = max(parent_memory) if parent_memory else 0 + if not self.operators_stats: + return parent_max + + return max( + parent_max, + *[ss.memory.get("max", 0) for ss in self.operators_stats], + ) + + +@dataclass +class OperatorStatsSummary: + operator_name: str + # Whether the operator associated with this OperatorStatsSummary object + # is a suboperator + is_sub_operator: bool + # This is the total walltime of the entire operator, typically obtained from + # `DatasetStats.time_total_s`. An important distinction is that this is the + # overall runtime of the operator, pulled from the stats actor, whereas the + # computed walltimes in `self.wall_time` are calculated on a operator level. + time_total_s: float + earliest_start_time: float + latest_end_time: float + # String summarizing high-level statistics from executing the operator + block_execution_summary_str: str + # The fields below are dicts with stats aggregated across blocks + # processed in this operator. For example: + # {"min": ..., "max": ..., "mean": ..., "sum": ...} + wall_time: Optional[Dict[str, float]] = None + cpu_time: Optional[Dict[str, float]] = None + udf_time: Optional[Dict[str, float]] = None + # memory: no "sum" stat + memory: Optional[Dict[str, float]] = None + output_num_rows: Optional[Dict[str, float]] = None + output_size_bytes: Optional[Dict[str, float]] = None + # node_count: "count" stat instead of "sum" + node_count: Optional[Dict[str, float]] = None + task_rows: Optional[Dict[str, float]] = None + + @classmethod + def from_block_metadata( + cls, + operator_name: str, + block_metas: List[BlockMetadata], + is_sub_operator: bool, + ) -> "OperatorStatsSummary": + """Calculate the stats for a operator from a given list of blocks, + and generates a `OperatorStatsSummary` object with the results. + + Args: + block_metas: List of `BlockMetadata` to calculate stats of + operator_name: Name of operator associated with `blocks` + is_sub_operator: Whether this set of blocks belongs to a sub operator. + Returns: + A `OperatorStatsSummary` object initialized with the calculated statistics + """ + exec_stats = [m.exec_stats for m in block_metas if m.exec_stats is not None] + rounded_total = 0 + time_total_s = 0 + earliest_start_time, latest_end_time = 0, 0 + + if exec_stats: + # Calculate the total execution time of operator as + # the difference between the latest end time and + # the earliest start time of all blocks in the operator. + earliest_start_time = min(s.start_time_s for s in exec_stats) + latest_end_time = max(s.end_time_s for s in exec_stats) + time_total_s = latest_end_time - earliest_start_time + + if is_sub_operator: + exec_summary_str = "{} blocks produced\n".format(len(exec_stats)) + else: + if exec_stats: + rounded_total = round(time_total_s, 2) + if rounded_total <= 0: + # Handle -0.0 case. + rounded_total = 0 + exec_summary_str = "{} blocks produced in {}s".format( + len(exec_stats), rounded_total + ) + else: + exec_summary_str = "" + exec_summary_str += "\n" + + task_rows = collections.defaultdict(int) + for meta in block_metas: + if meta.num_rows is not None and meta.exec_stats is not None: + task_rows[meta.exec_stats.task_idx] += meta.num_rows + task_rows_stats = None + if len(task_rows) > 0: + task_rows_stats = { + "min": min(task_rows.values()), + "max": max(task_rows.values()), + "mean": int(np.mean(list(task_rows.values()))), + "count": len(task_rows), + } + exec_summary_str = "{} tasks executed, {}".format( + len(task_rows), exec_summary_str + ) + + wall_time_stats, cpu_stats, memory_stats, udf_stats = None, None, None, None + if exec_stats: + wall_time_stats = { + "min": min([e.wall_time_s for e in exec_stats]), + "max": max([e.wall_time_s for e in exec_stats]), + "mean": np.mean([e.wall_time_s for e in exec_stats]), + "sum": sum([e.wall_time_s for e in exec_stats]), + } + cpu_stats = { + "min": min([e.cpu_time_s for e in exec_stats]), + "max": max([e.cpu_time_s for e in exec_stats]), + "mean": np.mean([e.cpu_time_s for e in exec_stats]), + "sum": sum([e.cpu_time_s for e in exec_stats]), + } + + memory_stats_mb = [ + round(e.max_rss_bytes / (1024 * 1024), 2) for e in exec_stats + ] + memory_stats = { + "min": min(memory_stats_mb), + "max": max(memory_stats_mb), + "mean": int(np.mean(memory_stats_mb)), + } + + udf_stats = { + "min": min([e.udf_time_s for e in exec_stats]), + "max": max([e.udf_time_s for e in exec_stats]), + "mean": np.mean([e.udf_time_s for e in exec_stats]), + "sum": sum([e.udf_time_s for e in exec_stats]), + } + + output_num_rows_stats = None + output_num_rows = [m.num_rows for m in block_metas if m.num_rows is not None] + if output_num_rows: + output_num_rows_stats = { + "min": min(output_num_rows), + "max": max(output_num_rows), + "mean": int(np.mean(output_num_rows)), + "sum": sum(output_num_rows), + } + + output_size_bytes_stats = None + output_size_bytes = [ + m.size_bytes for m in block_metas if m.size_bytes is not None + ] + if output_size_bytes: + output_size_bytes_stats = { + "min": min(output_size_bytes), + "max": max(output_size_bytes), + "mean": int(np.mean(output_size_bytes)), + "sum": sum(output_size_bytes), + } + + node_counts_stats = None + if exec_stats: + node_tasks = collections.defaultdict(set) + for s in exec_stats: + node_tasks[s.node_id].add(s.task_idx) + + node_counts = {node: len(tasks) for node, tasks in node_tasks.items()} + node_counts_stats = { + "min": min(node_counts.values()), + "max": max(node_counts.values()), + "mean": int(np.mean(list(node_counts.values()))), + "count": len(node_counts), + } + + return OperatorStatsSummary( + operator_name=operator_name, + is_sub_operator=is_sub_operator, + time_total_s=time_total_s, + earliest_start_time=earliest_start_time, + latest_end_time=latest_end_time, + block_execution_summary_str=exec_summary_str, + wall_time=wall_time_stats, + cpu_time=cpu_stats, + udf_time=udf_stats, + memory=memory_stats, + output_num_rows=output_num_rows_stats, + output_size_bytes=output_size_bytes_stats, + node_count=node_counts_stats, + task_rows=task_rows_stats, + ) + + def __str__(self) -> str: + """For a given (pre-calculated) `OperatorStatsSummary` object (e.g. generated from + `OperatorStatsSummary.from_block_metadata()`), returns a human-friendly string + that summarizes operator execution statistics. + + Returns: + String with summary statistics for executing the given operator. + """ + indent = "\t" if self.is_sub_operator else "" + out = self.block_execution_summary_str + + wall_time_stats = self.wall_time + if wall_time_stats: + out += indent + out += "* Remote wall time: {} min, {} max, {} mean, {} total\n".format( + fmt(wall_time_stats["min"]), + fmt(wall_time_stats["max"]), + fmt(wall_time_stats["mean"]), + fmt(wall_time_stats["sum"]), + ) + + cpu_stats = self.cpu_time + if cpu_stats: + out += indent + out += "* Remote cpu time: {} min, {} max, {} mean, {} total\n".format( + fmt(cpu_stats["min"]), + fmt(cpu_stats["max"]), + fmt(cpu_stats["mean"]), + fmt(cpu_stats["sum"]), + ) + + udf_stats = self.udf_time + if udf_stats: + out += indent + out += "* UDF time: {} min, {} max, {} mean, {} total\n".format( + fmt(udf_stats["min"]), + fmt(udf_stats["max"]), + fmt(udf_stats["mean"]), + fmt(udf_stats["sum"]), + ) + + memory_stats = self.memory + if memory_stats: + out += indent + out += "* Peak heap memory usage (MiB): {} min, {} max, {} mean\n".format( + memory_stats["min"], + memory_stats["max"], + memory_stats["mean"], + ) + + output_num_rows_stats = self.output_num_rows + if output_num_rows_stats: + out += indent + out += ( + "* Output num rows per block: {} min, {} max, {} mean, {} total\n" + ).format( + output_num_rows_stats["min"], + output_num_rows_stats["max"], + output_num_rows_stats["mean"], + output_num_rows_stats["sum"], + ) + + output_size_bytes_stats = self.output_size_bytes + if output_size_bytes_stats: + out += indent + out += ( + "* Output size bytes per block: {} min, {} max, {} mean, {} total\n" + ).format( + output_size_bytes_stats["min"], + output_size_bytes_stats["max"], + output_size_bytes_stats["mean"], + output_size_bytes_stats["sum"], + ) + + task_rows = self.task_rows + if task_rows: + out += indent + out += ( + "* Output rows per task: {} min, {} max, {} mean, {} tasks used\n" + ).format( + task_rows["min"], + task_rows["max"], + task_rows["mean"], + task_rows["count"], + ) + + node_count_stats = self.node_count + if node_count_stats: + out += indent + out += "* Tasks per node: {} min, {} max, {} mean; {} nodes used\n".format( + node_count_stats["min"], + node_count_stats["max"], + node_count_stats["mean"], + node_count_stats["count"], + ) + if output_num_rows_stats and self.time_total_s and wall_time_stats: + # For throughput, we compute both an observed Ray Data operator throughput + # and an estimated single node operator throughput. + + # The observed Ray Data operator throughput is computed by dividing the + # total number of rows produced by the wall time of the operator, + # time_total_s. + + # The estimated single node operator throughput is computed by dividing the + # total number of rows produced by the the sum of the wall times across all + # blocks of the operator. This assumes that on a single node the work done + # would be equivalent, with no concurrency. + total_num_out_rows = output_num_rows_stats["sum"] + out += indent + out += "* Operator throughput:\n" + out += ( + indent + "\t* Ray Data throughput:" + f" {total_num_out_rows / self.time_total_s} " + "rows/s\n" + ) + out += ( + indent + "\t* Estimated single node throughput:" + f" {total_num_out_rows / wall_time_stats['sum']} " + "rows/s\n" + ) + return out + + def __repr__(self, level=0) -> str: + """For a given (pre-calculated) `OperatorStatsSummary` object (e.g. generated from + `OperatorStatsSummary.from_block_metadata()`), returns a human-friendly string + that summarizes operator execution statistics. + + Returns: + String with summary statistics for executing the given operator. + """ + indent = leveled_indent(level) + indent += leveled_indent(1) if self.is_sub_operator else "" + + wall_time_stats = {k: fmt(v) for k, v in (self.wall_time or {}).items()} + cpu_stats = {k: fmt(v) for k, v in (self.cpu_time or {}).items()} + memory_stats = {k: fmt(v) for k, v in (self.memory or {}).items()} + output_num_rows_stats = { + k: fmt(v) for k, v in (self.output_num_rows or {}).items() + } + output_size_bytes_stats = { + k: fmt(v) for k, v in (self.output_size_bytes or {}).items() + } + node_conut_stats = {k: fmt(v) for k, v in (self.node_count or {}).items()} + out = ( + f"{indent}OperatorStatsSummary(\n" + f"{indent} operator_name='{self.operator_name}',\n" + f"{indent} is_suboperator={self.is_sub_operator},\n" + f"{indent} time_total_s={fmt(self.time_total_s)},\n" + # block_execution_summary_str already ends with \n + f"{indent} block_execution_summary_str={self.block_execution_summary_str}" + f"{indent} wall_time={wall_time_stats or None},\n" + f"{indent} cpu_time={cpu_stats or None},\n" + f"{indent} memory={memory_stats or None},\n" + f"{indent} output_num_rows={output_num_rows_stats or None},\n" + f"{indent} output_size_bytes={output_size_bytes_stats or None},\n" + f"{indent} node_count={node_conut_stats or None},\n" + f"{indent})" + ) + return out + + +@dataclass +class IterStatsSummary: + # Time spent in actor based prefetching, in seconds. + wait_time: Timer + # Time spent in `ray.get()`, in seconds + get_time: Timer + # Time spent in batch building, in seconds + next_time: Timer + # Time spent in `_format_batch_()`, in seconds + format_time: Timer + # Time spent in collate fn, in seconds + collate_time: Timer + # Time spent in finalize_fn, in seconds + finalize_batch_time: Timer + # Total time user thread is blocked by iter_batches + block_time: Timer + # Time spent in user code, in seconds + user_time: Timer + initialize_time: Timer + # Total time taken by Dataset iterator, in seconds + total_time: Timer + # Time spent in streaming split coordinator + streaming_split_coord_time: Timer + # Num of blocks that are in local object store + iter_blocks_local: int + # Num of blocks that are in remote node and have to fetch locally + iter_blocks_remote: int + # Num of blocks with unknown locations + iter_unknown_location: int + + def __str__(self) -> str: + return self.to_string() + + def to_string(self) -> str: + out = "" + if ( + self.block_time.get() + or self.total_time.get() + or self.get_time.get() + or self.next_time.get() + or self.format_time.get() + or self.collate_time.get() + or self.finalize_batch_time.get() + ): + out += "\nDataset iterator time breakdown:\n" + if self.total_time.get(): + out += "* Total time overall: {}\n".format(fmt(self.total_time.get())) + if self.initialize_time.get(): + out += ( + " * Total time in Ray Data iterator initialization code: " + "{}\n".format(fmt(self.initialize_time.get())) + ) + if self.block_time.get(): + out += ( + " * Total time user thread is blocked by Ray Data iter_batches: " + "{}\n".format(fmt(self.block_time.get())) + ) + if self.user_time.get(): + out += " * Total execution time for user thread: {}\n".format( + fmt(self.user_time.get()) + ) + out += ( + "* Batch iteration time breakdown (summed across prefetch threads):\n" + ) + if self.get_time.get(): + out += " * In ray.get(): {} min, {} max, {} avg, {} total\n".format( + fmt(self.get_time.min()), + fmt(self.get_time.max()), + fmt(self.get_time.avg()), + fmt(self.get_time.get()), + ) + if self.next_time.get(): + batch_creation_str = ( + " * In batch creation: {} min, {} max, " "{} avg, {} total\n" + ) + out += batch_creation_str.format( + fmt(self.next_time.min()), + fmt(self.next_time.max()), + fmt(self.next_time.avg()), + fmt(self.next_time.get()), + ) + if self.format_time.get(): + format_str = ( + " * In batch formatting: {} min, {} max, " "{} avg, {} total\n" + ) + out += format_str.format( + fmt(self.format_time.min()), + fmt(self.format_time.max()), + fmt(self.format_time.avg()), + fmt(self.format_time.get()), + ) + if self.collate_time.get(): + out += " * In collate_fn: {} min, {} max, {} avg, {} total\n".format( + fmt(self.collate_time.min()), + fmt(self.collate_time.max()), + fmt(self.collate_time.avg()), + fmt(self.collate_time.get()), + ) + if self.finalize_batch_time.get(): + format_str = ( + " * In host->device transfer: {} min, {} max, {} avg, {} total\n" + ) + out += format_str.format( + fmt(self.finalize_batch_time.min()), + fmt(self.finalize_batch_time.max()), + fmt(self.finalize_batch_time.avg()), + fmt(self.finalize_batch_time.get()), + ) + if DataContext.get_current().enable_get_object_locations_for_metrics: + out += "Block locations:\n" + out += " * Num blocks local: {}\n".format(self.iter_blocks_local) + out += " * Num blocks remote: {}\n".format(self.iter_blocks_remote) + out += " * Num blocks unknown location: {}\n".format( + self.iter_unknown_location + ) + if self.streaming_split_coord_time.get() != 0: + out += "Streaming split coordinator overhead time: " + out += f"{fmt(self.streaming_split_coord_time.get())}\n" + + return out + + def __repr__(self, level=0) -> str: + indent = leveled_indent(level) + return ( + f"IterStatsSummary(\n" + f"{indent} wait_time={fmt(self.wait_time.get()) or None},\n" + f"{indent} get_time={fmt(self.get_time.get()) or None},\n" + f"{indent} iter_blocks_local={self.iter_blocks_local or None},\n" + f"{indent} iter_blocks_remote={self.iter_blocks_remote or None},\n" + f"{indent} iter_unknown_location={self.iter_unknown_location or None},\n" + f"{indent} next_time={fmt(self.next_time.get()) or None},\n" + f"{indent} format_time={fmt(self.format_time.get()) or None},\n" + f"{indent} user_time={fmt(self.user_time.get()) or None},\n" + f"{indent} total_time={fmt(self.total_time.get()) or None},\n" + f"{indent})" + ) diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/torch_iterable_dataset.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/torch_iterable_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..9e2b4dd795a7c6b146665212a3369ad805dc30d1 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/torch_iterable_dataset.py @@ -0,0 +1,10 @@ +from torch.utils.data import IterableDataset + + +class TorchIterableDataset(IterableDataset): + def __init__(self, generator_func): + self.generator_func = generator_func + + def __iter__(self): + it = self.generator_func() + yield from it diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/util.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/util.py new file mode 100644 index 0000000000000000000000000000000000000000..1d0b70cf6a6c97b1901d7897219b9e86b14a1ebe --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/util.py @@ -0,0 +1,1091 @@ +import importlib +import logging +import os +import pathlib +import random +import sys +import threading +import time +import urllib.parse +from collections import deque +from types import ModuleType +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Iterable, + Iterator, + List, + Optional, + Tuple, + TypeVar, + Union, +) + +import numpy as np + +import ray +from ray._private.utils import _get_pyarrow_version +from ray.data.context import DEFAULT_READ_OP_MIN_NUM_BLOCKS, WARN_PREFIX, DataContext + +if TYPE_CHECKING: + import pandas + import pyarrow + + from ray.data._internal.compute import ComputeStrategy + from ray.data._internal.planner.exchange.sort_task_spec import SortKey + from ray.data.block import Block, BlockMetadata, UserDefinedFunction + from ray.data.datasource import Datasource, Reader + from ray.util.placement_group import PlacementGroup + +logger = logging.getLogger(__name__) + + +KiB = 1024 # bytes +MiB = 1024 * KiB +GiB = 1024 * MiB + + +# NOTE: Make sure that these lower and upper bounds stay in sync with version +# constraints given in python/setup.py. +# Inclusive minimum pyarrow version. +MIN_PYARROW_VERSION = "6.0.1" +RAY_DISABLE_PYARROW_VERSION_CHECK = "RAY_DISABLE_PYARROW_VERSION_CHECK" +_VERSION_VALIDATED = False +_LOCAL_SCHEME = "local" +_EXAMPLE_SCHEME = "example" + + +LazyModule = Union[None, bool, ModuleType] +_pyarrow_dataset: LazyModule = None + + +class _NullSentinel: + """Sentinel value that sorts greater than any other value.""" + + def __eq__(self, other): + return isinstance(other, _NullSentinel) + + def __lt__(self, other): + return False + + def __le__(self, other): + return isinstance(other, _NullSentinel) + + def __gt__(self, other): + return True + + def __ge__(self, other): + return True + + +NULL_SENTINEL = _NullSentinel() + + +def _lazy_import_pyarrow_dataset() -> LazyModule: + global _pyarrow_dataset + if _pyarrow_dataset is None: + try: + from pyarrow import dataset as _pyarrow_dataset + except ModuleNotFoundError: + # If module is not found, set _pyarrow to False so we won't + # keep trying to import it on every _lazy_import_pyarrow() call. + _pyarrow_dataset = False + return _pyarrow_dataset + + +def _check_pyarrow_version(): + """Check that pyarrow's version is within the supported bounds.""" + global _VERSION_VALIDATED + + if not _VERSION_VALIDATED: + if os.environ.get(RAY_DISABLE_PYARROW_VERSION_CHECK, "0") == "1": + _VERSION_VALIDATED = True + return + + version = _get_pyarrow_version() + if version is not None: + from packaging.version import parse as parse_version + + if parse_version(version) < parse_version(MIN_PYARROW_VERSION): + raise ImportError( + f"Dataset requires pyarrow >= {MIN_PYARROW_VERSION}, but " + f"{version} is installed. Reinstall with " + f'`pip install -U "pyarrow"`. ' + "If you want to disable this pyarrow version check, set the " + f"environment variable {RAY_DISABLE_PYARROW_VERSION_CHECK}=1." + ) + else: + logger.warning( + "You are using the 'pyarrow' module, but the exact version is unknown " + "(possibly carried as an internal component by another module). Please " + f"make sure you are using pyarrow >= {MIN_PYARROW_VERSION} to ensure " + "compatibility with Ray Dataset. " + "If you want to disable this pyarrow version check, set the " + f"environment variable {RAY_DISABLE_PYARROW_VERSION_CHECK}=1." + ) + _VERSION_VALIDATED = True + + +def _autodetect_parallelism( + parallelism: int, + target_max_block_size: int, + ctx: DataContext, + datasource_or_legacy_reader: Optional[Union["Datasource", "Reader"]] = None, + mem_size: Optional[int] = None, + placement_group: Optional["PlacementGroup"] = None, + avail_cpus: Optional[int] = None, +) -> Tuple[int, str, Optional[int]]: + """Returns parallelism to use and the min safe parallelism to avoid OOMs. + + This detects parallelism using the following heuristics, applied in order: + + 1) We start with the default value of 200. This can be overridden by + setting the `read_op_min_num_blocks` attribute of + :class:`~ray.data.context.DataContext`. + 2) Min block size. If the parallelism would make blocks smaller than this + threshold, the parallelism is reduced to avoid the overhead of tiny blocks. + 3) Max block size. If the parallelism would make blocks larger than this + threshold, the parallelism is increased to avoid OOMs during processing. + 4) Available CPUs. If the parallelism cannot make use of all the available + CPUs in the cluster, the parallelism is increased until it can. + + Args: + parallelism: The user-requested parallelism, or -1 for auto-detection. + target_max_block_size: The target max block size to + produce. We pass this separately from the + DatasetContext because it may be set per-op instead of + per-Dataset. + ctx: The current Dataset context to use for configs. + datasource_or_legacy_reader: The datasource or legacy reader, to be used for + data size estimation. + mem_size: If passed, then used to compute the parallelism according to + target_max_block_size. + placement_group: The placement group that this Dataset + will execute inside, if any. + avail_cpus: Override avail cpus detection (for testing only). + + Returns: + Tuple of detected parallelism (only if -1 was specified), the reason + for the detected parallelism (only if -1 was specified), and the estimated + inmemory size of the dataset. + """ + min_safe_parallelism = 1 + max_reasonable_parallelism = sys.maxsize + if mem_size is None and datasource_or_legacy_reader: + mem_size = datasource_or_legacy_reader.estimate_inmemory_data_size() + if mem_size is not None and not np.isnan(mem_size): + min_safe_parallelism = max(1, int(mem_size / target_max_block_size)) + max_reasonable_parallelism = max(1, int(mem_size / ctx.target_min_block_size)) + + reason = "" + if parallelism < 0: + if parallelism != -1: + raise ValueError("`parallelism` must either be -1 or a positive integer.") + + if ( + ctx.min_parallelism is not None + and ctx.min_parallelism != DEFAULT_READ_OP_MIN_NUM_BLOCKS + and ctx.read_op_min_num_blocks == DEFAULT_READ_OP_MIN_NUM_BLOCKS + ): + logger.warning( + "``DataContext.min_parallelism`` is deprecated in Ray 2.10. " + "Please specify ``DataContext.read_op_min_num_blocks`` instead." + ) + ctx.read_op_min_num_blocks = ctx.min_parallelism + + # Start with 2x the number of cores as a baseline, with a min floor. + if placement_group is None: + placement_group = ray.util.get_current_placement_group() + avail_cpus = avail_cpus or _estimate_avail_cpus(placement_group) + parallelism = max( + min(ctx.read_op_min_num_blocks, max_reasonable_parallelism), + min_safe_parallelism, + avail_cpus * 2, + ) + + if parallelism == ctx.read_op_min_num_blocks: + reason = ( + "DataContext.get_current().read_op_min_num_blocks=" + f"{ctx.read_op_min_num_blocks}" + ) + elif parallelism == max_reasonable_parallelism: + reason = ( + "output blocks of size at least " + "DataContext.get_current().target_min_block_size=" + f"{ctx.target_min_block_size / (1024 * 1024)}MiB" + ) + elif parallelism == min_safe_parallelism: + reason = ( + "output blocks of size at most " + "DataContext.get_current().target_max_block_size=" + f"{ctx.target_max_block_size / (1024 * 1024)}MiB" + ) + else: + reason = ( + "parallelism at least twice the available number " + f"of CPUs ({avail_cpus})" + ) + + logger.debug( + f"Autodetected parallelism={parallelism} based on " + f"estimated_available_cpus={avail_cpus} and " + f"estimated_data_size={mem_size}." + ) + + return parallelism, reason, mem_size + + +def _estimate_avail_cpus(cur_pg: Optional["PlacementGroup"]) -> int: + """Estimates the available CPU parallelism for this Dataset in the cluster. + + If we aren't in a placement group, this is trivially the number of CPUs in the + cluster. Otherwise, we try to calculate how large the placement group is relative + to the size of the cluster. + + Args: + cur_pg: The current placement group, if any. + """ + cluster_cpus = int(ray.cluster_resources().get("CPU", 1)) + cluster_gpus = int(ray.cluster_resources().get("GPU", 0)) + + # If we're in a placement group, we shouldn't assume the entire cluster's + # resources are available for us to use. Estimate an upper bound on what's + # reasonable to assume is available for datasets to use. + if cur_pg: + pg_cpus = 0 + for bundle in cur_pg.bundle_specs: + # Calculate the proportion of the cluster this placement group "takes up". + # Then scale our cluster_cpus proportionally to avoid over-parallelizing + # if there are many parallel Tune trials using the cluster. + cpu_fraction = bundle.get("CPU", 0) / max(1, cluster_cpus) + gpu_fraction = bundle.get("GPU", 0) / max(1, cluster_gpus) + max_fraction = max(cpu_fraction, gpu_fraction) + # Over-parallelize by up to a factor of 2, but no more than that. It's + # preferrable to over-estimate than under-estimate. + pg_cpus += 2 * int(max_fraction * cluster_cpus) + + return min(cluster_cpus, pg_cpus) + + return cluster_cpus + + +def _estimate_available_parallelism() -> int: + """Estimates the available CPU parallelism for this Dataset in the cluster. + If we are currently in a placement group, take that into account.""" + cur_pg = ray.util.get_current_placement_group() + return _estimate_avail_cpus(cur_pg) + + +def _warn_on_high_parallelism(requested_parallelism, num_read_tasks): + available_cpu_slots = ray.available_resources().get("CPU", 1) + if ( + requested_parallelism + and num_read_tasks > available_cpu_slots * 4 + and num_read_tasks >= 5000 + ): + logger.warning( + f"{WARN_PREFIX} The requested parallelism of {requested_parallelism} " + "is more than 4x the number of available CPU slots in the cluster of " + f"{available_cpu_slots}. This can " + "lead to slowdowns during the data reading phase due to excessive " + "task creation. Reduce the parallelism to match with the available " + "CPU slots in the cluster, or set parallelism to -1 for Ray Data " + "to automatically determine the parallelism. " + "You can ignore this message if the cluster is expected to autoscale." + ) + + +def _check_import(obj, *, module: str, package: str) -> None: + """Check if a required dependency is installed. + + If `module` can't be imported, this function raises an `ImportError` instructing + the user to install `package` from PyPI. + + Args: + obj: The object that has a dependency. + module: The name of the module to import. + package: The name of the package on PyPI. + """ + try: + importlib.import_module(module) + except ImportError: + raise ImportError( + f"`{obj.__class__.__name__}` depends on '{package}', but '{package}' " + f"couldn't be imported. You can install '{package}' by running `pip " + f"install {package}`." + ) + + +def _resolve_custom_scheme(path: str) -> str: + """Returns the resolved path if the given path follows a Ray-specific custom + scheme. Othewise, returns the path unchanged. + + The supported custom schemes are: "local", "example". + """ + parsed_uri = urllib.parse.urlparse(path) + if parsed_uri.scheme == _LOCAL_SCHEME: + path = parsed_uri.netloc + parsed_uri.path + elif parsed_uri.scheme == _EXAMPLE_SCHEME: + example_data_path = pathlib.Path(__file__).parent.parent / "examples" / "data" + path = example_data_path / (parsed_uri.netloc + parsed_uri.path) + path = str(path.resolve()) + return path + + +def _is_local_scheme(paths: Union[str, List[str]]) -> bool: + """Returns True if the given paths are in local scheme. + Note: The paths must be in same scheme, i.e. it's invalid and + will raise error if paths are mixed with different schemes. + """ + if isinstance(paths, str): + paths = [paths] + if isinstance(paths, pathlib.Path): + paths = [str(paths)] + elif not isinstance(paths, list) or any(not isinstance(p, str) for p in paths): + raise ValueError("paths must be a path string or a list of path strings.") + elif len(paths) == 0: + raise ValueError("Must provide at least one path.") + num = sum(urllib.parse.urlparse(path).scheme == _LOCAL_SCHEME for path in paths) + if num > 0 and num < len(paths): + raise ValueError( + "The paths must all be local-scheme or not local-scheme, " + f"but found mixed {paths}" + ) + return num == len(paths) + + +def _truncated_repr(obj: Any) -> str: + """Utility to return a truncated object representation for error messages.""" + msg = str(obj) + if len(msg) > 200: + msg = msg[:200] + "..." + return msg + + +def _insert_doc_at_pattern( + obj, + *, + message: str, + pattern: str, + insert_after: bool = True, + directive: Optional[str] = None, + skip_matches: int = 0, +) -> str: + if "\n" in message: + raise ValueError( + "message shouldn't contain any newlines, since this function will insert " + f"its own linebreaks when text wrapping: {message}" + ) + + doc = obj.__doc__.strip() + if not doc: + doc = "" + + if pattern == "" and insert_after: + # Empty pattern + insert_after means that we want to append the message to the + # end of the docstring. + head = doc + tail = "" + else: + tail = doc + i = tail.find(pattern) + skip_matches_left = skip_matches + while i != -1: + if insert_after: + # Set offset to the first character after the pattern. + offset = i + len(pattern) + else: + # Set offset to the first character in the matched line. + offset = tail[:i].rfind("\n") + 1 + head = tail[:offset] + tail = tail[offset:] + skip_matches_left -= 1 + if skip_matches_left <= 0: + break + elif not insert_after: + # Move past the found pattern, since we're skipping it. + tail = tail[i - offset + len(pattern) :] + i = tail.find(pattern) + else: + raise ValueError( + f"Pattern {pattern} not found after {skip_matches} skips in docstring " + f"{doc}" + ) + # Get indentation of the to-be-inserted text. + after_lines = list(filter(bool, tail.splitlines())) + if len(after_lines) > 0: + lines = after_lines + else: + lines = list(filter(bool, reversed(head.splitlines()))) + # Should always have at least one non-empty line in the docstring. + assert len(lines) > 0 + indent = " " * (len(lines[0]) - len(lines[0].lstrip())) + # Handle directive. + message = message.strip("\n") + if directive is not None: + base = f"{indent}.. {directive}::\n" + message = message.replace("\n", "\n" + indent + " " * 4) + message = base + indent + " " * 4 + message + else: + message = indent + message.replace("\n", "\n" + indent) + # Add two blank lines before/after message, if necessary. + if insert_after ^ (pattern == "\n\n"): + # Only two blank lines before message if: + # 1. Inserting message after pattern and pattern is not two blank lines. + # 2. Inserting message before pattern and pattern is two blank lines. + message = "\n\n" + message + if (not insert_after) ^ (pattern == "\n\n"): + # Only two blank lines after message if: + # 1. Inserting message before pattern and pattern is not two blank lines. + # 2. Inserting message after pattern and pattern is two blank lines. + message = message + "\n\n" + + # Insert message before/after pattern. + parts = [head, message, tail] + # Build new docstring. + obj.__doc__ = "".join(parts) + + +def _consumption_api( + if_more_than_read: bool = False, + datasource_metadata: Optional[str] = None, + extra_condition: Optional[str] = None, + delegate: Optional[str] = None, + pattern="Examples:", + insert_after=False, +): + """Annotate the function with an indication that it's a consumption API, and that it + will trigger Dataset execution. + """ + base = ( + " will trigger execution of the lazy transformations performed on " + "this dataset." + ) + if delegate: + message = delegate + base + elif not if_more_than_read: + message = "This operation" + base + else: + condition = "If this dataset consists of more than a read, " + if datasource_metadata is not None: + condition += ( + f"or if the {datasource_metadata} can't be determined from the " + "metadata provided by the datasource, " + ) + if extra_condition is not None: + condition += extra_condition + ", " + message = condition + "then this operation" + base + + def wrap(obj): + _insert_doc_at_pattern( + obj, + message=message, + pattern=pattern, + insert_after=insert_after, + directive="note", + ) + return obj + + return wrap + + +def ConsumptionAPI(*args, **kwargs): + """Annotate the function with an indication that it's a consumption API, and that it + will trigger Dataset execution. + """ + if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): + return _consumption_api()(args[0]) + return _consumption_api(*args, **kwargs) + + +def _all_to_all_api(*args, **kwargs): + """Annotate the function with an indication that it's a all to all API, and that it + is an operation that requires all inputs to be materialized in-memory to execute. + """ + + def wrap(obj): + _insert_doc_at_pattern( + obj, + message=( + "This operation requires all inputs to be " + "materialized in object store for it to execute." + ), + pattern="Examples:", + insert_after=False, + directive="note", + ) + return obj + + return wrap + + +def AllToAllAPI(*args, **kwargs): + """Annotate the function with an indication that it's a all to all API, and that it + is an operation that requires all inputs to be materialized in-memory to execute. + """ + # This should only be used as a decorator for dataset methods. + assert len(args) == 1 and len(kwargs) == 0 and callable(args[0]) + return _all_to_all_api()(args[0]) + + +def _split_list(arr: List[Any], num_splits: int) -> List[List[Any]]: + """Split the list into `num_splits` lists. + + The splits will be even if the `num_splits` divides the length of list, otherwise + the remainder (suppose it's R) will be allocated to the first R splits (one for + each). + This is the same as numpy.array_split(). The reason we make this a separate + implementation is to allow the heterogeneity in the elements in the list. + """ + assert num_splits > 0 + q, r = divmod(len(arr), num_splits) + splits = [ + arr[i * q + min(i, r) : (i + 1) * q + min(i + 1, r)] for i in range(num_splits) + ] + return splits + + +def get_compute_strategy( + fn: "UserDefinedFunction", + fn_constructor_args: Optional[Iterable[Any]] = None, + compute: Optional[Union[str, "ComputeStrategy"]] = None, + concurrency: Optional[Union[int, Tuple[int, int]]] = None, +) -> "ComputeStrategy": + """Get `ComputeStrategy` based on the function or class, and concurrency + information. + + Args: + fn: The function or generator to apply to a record batch, or a class type + that can be instantiated to create such a callable. + fn_constructor_args: Positional arguments to pass to ``fn``'s constructor. + compute: Either "tasks" (default) to use Ray Tasks or an + :class:`~ray.data.ActorPoolStrategy` to use an autoscaling actor pool. + concurrency: The number of Ray workers to use concurrently. + + Returns: + The `ComputeStrategy` for execution. + """ + # Lazily import these objects to avoid circular imports. + from ray.data._internal.compute import ActorPoolStrategy, TaskPoolStrategy + from ray.data.block import CallableClass + + if isinstance(fn, CallableClass): + is_callable_class = True + else: + # TODO(chengsu): disallow object that is not a function. For example, + # An object instance of class often indicates a bug in user code. + is_callable_class = False + if fn_constructor_args is not None: + raise ValueError( + "``fn_constructor_args`` can only be specified if providing a " + f"callable class instance for ``fn``, but got: {fn}." + ) + + if compute is not None: + # Legacy code path to support `compute` argument. + logger.warning( + "The argument ``compute`` is deprecated in Ray 2.9. Please specify " + "argument ``concurrency`` instead. For more information, see " + "https://docs.ray.io/en/master/data/transforming-data.html#" + "stateful-transforms." + ) + if is_callable_class and ( + compute == "tasks" or isinstance(compute, TaskPoolStrategy) + ): + raise ValueError( + "``compute`` must specify an actor compute strategy when using a " + f"callable class, but got: {compute}. For example, use " + "``compute=ray.data.ActorPoolStrategy(size=n)``." + ) + elif not is_callable_class and ( + compute == "actors" or isinstance(compute, ActorPoolStrategy) + ): + raise ValueError( + f"``compute`` is specified as the actor compute strategy: {compute}, " + f"but ``fn`` is not a callable class: {fn}. Pass a callable class or " + "use the default ``compute`` strategy." + ) + return compute + elif concurrency is not None: + if isinstance(concurrency, tuple): + if ( + len(concurrency) == 2 + and isinstance(concurrency[0], int) + and isinstance(concurrency[1], int) + ): + if is_callable_class: + return ActorPoolStrategy( + min_size=concurrency[0], max_size=concurrency[1] + ) + else: + raise ValueError( + "``concurrency`` is set as a tuple of integers, but ``fn`` " + f"is not a callable class: {fn}. Use ``concurrency=n`` to " + "control maximum number of workers to use." + ) + else: + raise ValueError( + "``concurrency`` is expected to be set as a tuple of " + f"integers, but got: {concurrency}." + ) + elif isinstance(concurrency, int): + if is_callable_class: + return ActorPoolStrategy(size=concurrency) + else: + return TaskPoolStrategy(size=concurrency) + else: + raise ValueError( + "``concurrency`` is expected to be set as an integer or a " + f"tuple of integers, but got: {concurrency}." + ) + else: + if is_callable_class: + raise ValueError( + "``concurrency`` must be specified when using a callable class. " + "For example, use ``concurrency=n`` for a pool of ``n`` workers." + ) + else: + return TaskPoolStrategy() + + +def capfirst(s: str): + """Capitalize the first letter of a string + + Args: + s: String to capitalize + + Returns: + Capitalized string + """ + return s[0].upper() + s[1:] + + +def capitalize(s: str): + """Capitalize a string, removing '_' and keeping camelcase. + + Args: + s: String to capitalize + + Returns: + Capitalized string with no underscores. + """ + return "".join(capfirst(x) for x in s.split("_")) + + +def pandas_df_to_arrow_block(df: "pandas.DataFrame") -> "Block": + from ray.data.block import BlockAccessor, BlockExecStats + + block = BlockAccessor.for_block(df).to_arrow() + stats = BlockExecStats.builder() + return ( + block, + BlockAccessor.for_block(block).get_metadata(exec_stats=stats.build()), + ) + + +def ndarray_to_block(ndarray: np.ndarray, ctx: DataContext) -> "Block": + from ray.data.block import BlockAccessor, BlockExecStats + + DataContext._set_current(ctx) + + stats = BlockExecStats.builder() + block = BlockAccessor.batch_to_block({"data": ndarray}) + metadata = BlockAccessor.for_block(block).get_metadata(exec_stats=stats.build()) + return block, metadata + + +def get_table_block_metadata( + table: Union["pyarrow.Table", "pandas.DataFrame"] +) -> "BlockMetadata": + from ray.data.block import BlockAccessor, BlockExecStats + + stats = BlockExecStats.builder() + return BlockAccessor.for_block(table).get_metadata(exec_stats=stats.build()) + + +def unify_block_metadata_schema( + metadata: List["BlockMetadata"], +) -> Optional[Union[type, "pyarrow.lib.Schema"]]: + """For the input list of BlockMetadata, return a unified schema of the + corresponding blocks. If the metadata have no valid schema, returns None. + """ + # Some blocks could be empty, in which case we cannot get their schema. + # TODO(ekl) validate schema is the same across different blocks. + from ray.data._internal.arrow_ops.transform_pyarrow import unify_schemas + + # First check if there are blocks with computed schemas, then unify + # valid schemas from all such blocks. + schemas_to_unify = [] + for m in metadata: + if m.schema is not None and (m.num_rows is None or m.num_rows > 0): + schemas_to_unify.append(m.schema) + if schemas_to_unify: + # Check valid pyarrow installation before attempting schema unification + try: + import pyarrow as pa + except ImportError: + pa = None + # If the result contains PyArrow schemas, unify them + if pa is not None and all(isinstance(s, pa.Schema) for s in schemas_to_unify): + return unify_schemas(schemas_to_unify) + # Otherwise, if the resulting schemas are simple types (e.g. int), + # return the first schema. + return schemas_to_unify[0] + return None + + +def find_partition_index( + table: Union["pyarrow.Table", "pandas.DataFrame"], + desired: List[Any], + sort_key: "SortKey", +) -> int: + columns = sort_key.get_columns() + descending = sort_key.get_descending() + + left, right = 0, len(table) + for i in range(len(desired)): + if left == right: + return right + col_name = columns[i] + col_vals = table[col_name].to_numpy()[left:right] + desired_val = desired[i] + + # Handle null values - replace them with sentinel values + if desired_val is None: + desired_val = NULL_SENTINEL + + # Replace None/NaN values in col_vals with sentinel + null_mask = col_vals == None # noqa: E711 + if null_mask.any(): + col_vals = col_vals.copy() # Make a copy to avoid modifying original + col_vals[null_mask] = NULL_SENTINEL + + prevleft = left + if descending is True: + left = prevleft + ( + len(col_vals) + - np.searchsorted( + col_vals, + desired_val, + side="right", + sorter=np.arange(len(col_vals) - 1, -1, -1), + ) + ) + right = prevleft + ( + len(col_vals) + - np.searchsorted( + col_vals, + desired_val, + side="left", + sorter=np.arange(len(col_vals) - 1, -1, -1), + ) + ) + else: + left = prevleft + np.searchsorted(col_vals, desired_val, side="left") + right = prevleft + np.searchsorted(col_vals, desired_val, side="right") + return right if descending is True else left + + +def find_partitions(table, boundaries, sort_key): + partitions = [] + + # For each boundary value, count the number of items that are less + # than it. Since the block is sorted, these counts partition the items + # such that boundaries[i] <= x < boundaries[i + 1] for each x in + # partition[i]. If `descending` is true, `boundaries` would also be + # in descending order and we only need to count the number of items + # *greater than* the boundary value instead. + bounds = [ + find_partition_index(table, boundary, sort_key) for boundary in boundaries + ] + + last_idx = 0 + for idx in bounds: + partitions.append(table[last_idx:idx]) + last_idx = idx + partitions.append(table[last_idx:]) + return partitions + + +def get_attribute_from_class_name(class_name: str) -> Any: + """Get Python attribute from the provided class name. + + The caller needs to make sure the provided class name includes + full module name, and can be imported successfully. + """ + from importlib import import_module + + paths = class_name.split(".") + if len(paths) < 2: + raise ValueError(f"Cannot create object from {class_name}.") + + module_name = ".".join(paths[:-1]) + attribute_name = paths[-1] + return getattr(import_module(module_name), attribute_name) + + +class Queue: + """A thread-safe queue implementation for multiple producers and consumers. + + Provide `release()` to exit producer threads cooperatively for resource release. + """ + + def __init__(self, queue_size: int): + # The queue shared across multiple producer threads. + self._queue = deque() + # The boolean varilable to indicate whether producer threads should exit. + self._threads_exit = False + # The semaphore for producer threads to put item into queue. + self._producer_semaphore = threading.Semaphore(queue_size) + # The semaphore for consumer threads to get item from queue. + self._consumer_semaphore = threading.Semaphore(0) + # The mutex lock to guard access of `self._queue` and `self._threads_exit`. + self._mutex = threading.Lock() + + def put(self, item: Any) -> bool: + """Put an item into the queue. + + Block if necessary until a free slot is available in queue. + This method is called by producer threads. + + Returns: + True if the caller thread should exit immediately. + """ + self._producer_semaphore.acquire() + with self._mutex: + if self._threads_exit: + return True + else: + self._queue.append(item) + self._consumer_semaphore.release() + return False + + def get(self) -> Any: + """Remove and return an item from the queue. + + Block if necessary until an item is available in queue. + This method is called by consumer threads. + """ + self._consumer_semaphore.acquire() + with self._mutex: + next_item = self._queue.popleft() + self._producer_semaphore.release() + return next_item + + def release(self, num_threads: int): + """Release `num_threads` of producers so they would exit cooperatively.""" + with self._mutex: + self._threads_exit = True + for _ in range(num_threads): + # NOTE: After Python 3.9+, Semaphore.release(n) can be used to + # release all threads at once. + self._producer_semaphore.release() + + def qsize(self): + """Return the size of the queue.""" + with self._mutex: + return len(self._queue) + + +T = TypeVar("T") +U = TypeVar("U") + + +def make_async_gen( + base_iterator: Iterator[T], + fn: Callable[[Iterator[T]], Iterator[U]], + num_workers: int = 1, +) -> Iterator[U]: + """Returns a new iterator with elements fetched from the base_iterator + in an async fashion using a threadpool. + + Each thread in the threadpool will fetch data from the base_iterator in a + thread-safe fashion, and apply the provided `fn` computation concurrently. + + Args: + base_iterator: The iterator to asynchronously fetch from. + fn: The function to run on the input iterator. + num_workers: The number of threads to use in the threadpool. Defaults to 1. + + Returns: + An iterator with the same elements as outputted from `fn`. + """ + + if num_workers < 1: + raise ValueError("Size of threadpool must be at least 1.") + + # Use a lock to fetch from the base_iterator in a thread-safe fashion. + def convert_to_threadsafe_iterator(base_iterator: Iterator[T]) -> Iterator[T]: + class ThreadSafeIterator: + def __init__(self, it): + self.lock = threading.Lock() + self.it = it + + def __next__(self): + with self.lock: + return next(self.it) + + def __iter__(self): + return self + + return ThreadSafeIterator(base_iterator) + + thread_safe_generator = convert_to_threadsafe_iterator(base_iterator) + + class Sentinel: + def __init__(self, thread_index: int): + self.thread_index = thread_index + + output_queue = Queue(1) + + # Because pulling from the base iterator cannot happen concurrently, + # we must execute the expensive computation in a separate step which + # can be parallelized via a threadpool. + def execute_computation(thread_index: int): + try: + for item in fn(thread_safe_generator): + if output_queue.put(item): + # Return early when it's instructed to do so. + return + output_queue.put(Sentinel(thread_index)) + except Exception as e: + output_queue.put(e) + + # Use separate threads to produce output batches. + threads = [ + threading.Thread(target=execute_computation, args=(i,), daemon=True) + for i in range(num_workers) + ] + + for thread in threads: + thread.start() + + # Use main thread to consume output batches. + num_threads_finished = 0 + try: + while True: + next_item = output_queue.get() + if isinstance(next_item, Exception): + raise next_item + if isinstance(next_item, Sentinel): + num_threads_finished += 1 + else: + yield next_item + if num_threads_finished >= num_workers: + break + finally: + # Cooperatively exit all producer threads. + # This is to avoid these daemon threads hanging there with holding batches in + # memory, which can cause GRAM OOM easily. This can happen when caller breaks + # in the middle of iteration. + num_threads_alive = num_workers - num_threads_finished + if num_threads_alive > 0: + output_queue.release(num_threads_alive) + + +def call_with_retry( + f: Callable[[], Any], + description: str, + *, + match: Optional[List[str]] = None, + max_attempts: int = 10, + max_backoff_s: int = 32, +) -> Any: + """Retry a function with exponential backoff. + + Args: + f: The function to retry. + match: A list of strings to match in the exception message. If ``None``, any + error is retried. + description: An imperitive description of the function being retried. For + example, "open the file". + max_attempts: The maximum number of attempts to retry. + max_backoff_s: The maximum number of seconds to backoff. + """ + assert max_attempts >= 1, f"`max_attempts` must be positive. Got {max_attempts}." + + for i in range(max_attempts): + try: + return f() + except Exception as e: + is_retryable = match is None or any( + [pattern in str(e) for pattern in match] + ) + if is_retryable and i + 1 < max_attempts: + # Retry with binary expoential backoff with random jitter. + backoff = min((2 ** (i + 1)), max_backoff_s) * random.random() + logger.debug( + f"Retrying {i+1} attempts to {description} after {backoff} seconds." + ) + time.sleep(backoff) + else: + raise e from None + + +def iterate_with_retry( + iterable_factory: Callable[[], Iterable], + description: str, + *, + match: Optional[List[str]] = None, + max_attempts: int = 10, + max_backoff_s: int = 32, +) -> Any: + """Iterate through an iterable with retries. + + If the iterable raises an exception, this function recreates and re-iterates + through the iterable, while skipping the items that have already been yielded. + + Args: + iterable_factory: A no-argument function that creates the iterable. + match: A list of strings to match in the exception message. If ``None``, any + error is retried. + description: An imperitive description of the function being retried. For + example, "open the file". + max_attempts: The maximum number of attempts to retry. + max_backoff_s: The maximum number of seconds to backoff. + """ + assert max_attempts >= 1, f"`max_attempts` must be positive. Got {max_attempts}." + + num_items_yielded = 0 + for i in range(max_attempts): + try: + iterable = iterable_factory() + for i, item in enumerate(iterable): + if i < num_items_yielded: + # Skip items that have already been yielded. + continue + + num_items_yielded += 1 + yield item + return + except Exception as e: + is_retryable = match is None or any( + [pattern in str(e) for pattern in match] + ) + if is_retryable and i + 1 < max_attempts: + # Retry with binary expoential backoff with random jitter. + backoff = min((2 ** (i + 1)), max_backoff_s) * random.random() + logger.debug( + f"Retrying {i+1} attempts to {description} after {backoff} seconds." + ) + time.sleep(backoff) + else: + raise e from None + + +def create_dataset_tag(dataset_name: Optional[str], *args): + tag = dataset_name or "dataset" + for arg in args: + tag += f"_{arg}" + return tag + + +def convert_bytes_to_human_readable_str(num_bytes: int) -> str: + if num_bytes >= 1e9: + num_bytes_str = f"{round(num_bytes / 1e9)}GB" + elif num_bytes >= 1e6: + num_bytes_str = f"{round(num_bytes / 1e6)}MB" + else: + num_bytes_str = f"{round(num_bytes / 1e3)}KB" + return num_bytes_str diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/block.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/block.py new file mode 100644 index 0000000000000000000000000000000000000000..fcab3feb67eb5abd917d0dfbd4e612598c85dd02 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/block.py @@ -0,0 +1,477 @@ +import collections +import logging +import os +import time +from dataclasses import dataclass +from enum import Enum +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterator, + List, + Literal, + Optional, + Protocol, + Tuple, + TypeVar, + Union, +) + +import numpy as np + +import ray +from ray import DynamicObjectRefGenerator +from ray.air.util.tensor_extensions.arrow import ArrowConversionError +from ray.data._internal.util import _check_pyarrow_version, _truncated_repr +from ray.types import ObjectRef +from ray.util import log_once +from ray.util.annotations import DeveloperAPI + +import psutil + +try: + import resource +except ImportError: + resource = None + +if TYPE_CHECKING: + import pandas + import pyarrow + + from ray.data._internal.block_builder import BlockBuilder + from ray.data._internal.planner.exchange.sort_task_spec import SortKey + from ray.data.aggregate import AggregateFn + + +T = TypeVar("T", contravariant=True) +U = TypeVar("U", covariant=True) + +KeyType = TypeVar("KeyType") +AggType = TypeVar("AggType") + + +# Represents a batch of records to be stored in the Ray object store. +# +# Block data can be accessed in a uniform way via ``BlockAccessors`` like` +# ``ArrowBlockAccessor``. +Block = Union["pyarrow.Table", "pandas.DataFrame"] + + +logger = logging.getLogger(__name__) + + +@DeveloperAPI +class BlockType(Enum): + ARROW = "arrow" + PANDAS = "pandas" + + +# User-facing data batch type. This is the data type for data that is supplied to and +# returned from batch UDFs. +DataBatch = Union["pyarrow.Table", "pandas.DataFrame", Dict[str, np.ndarray]] + +# User-facing data column type. This is the data type for data that is supplied to and +# returned from column UDFs. +DataBatchColumn = Union[ + "pyarrow.ChunkedArray", "pyarrow.Array", "pandas.Series", np.ndarray +] + + +# A class type that implements __call__. +CallableClass = type + + +class _CallableClassProtocol(Protocol[T, U]): + def __call__(self, __arg: T) -> Union[U, Iterator[U]]: + ... + + +# A user defined function passed to map, map_batches, ec. +UserDefinedFunction = Union[ + Callable[[T], U], + Callable[[T], Iterator[U]], + "_CallableClassProtocol", +] + +# A list of block references pending computation by a single task. For example, +# this may be the output of a task reading a file. +BlockPartition = List[Tuple[ObjectRef[Block], "BlockMetadata"]] + +# The metadata that describes the output of a BlockPartition. This has the +# same type as the metadata that describes each block in the partition. +BlockPartitionMetadata = List["BlockMetadata"] + +# TODO(ekl/chengsu): replace this with just +# `DynamicObjectRefGenerator` once block splitting +# is on by default. When block splitting is off, the type is a plain block. +MaybeBlockPartition = Union[Block, DynamicObjectRefGenerator] + +VALID_BATCH_FORMATS = ["pandas", "pyarrow", "numpy", None] +DEFAULT_BATCH_FORMAT = "numpy" + + +def _apply_batch_format(given_batch_format: Optional[str]) -> str: + if given_batch_format == "default": + given_batch_format = DEFAULT_BATCH_FORMAT + if given_batch_format not in VALID_BATCH_FORMATS: + raise ValueError( + f"The given batch format {given_batch_format} isn't allowed (must be one of" + f" {VALID_BATCH_FORMATS})." + ) + return given_batch_format + + +def _apply_batch_size( + given_batch_size: Optional[Union[int, Literal["default"]]] +) -> Optional[int]: + if given_batch_size == "default": + return ray.data.context.DEFAULT_BATCH_SIZE + else: + return given_batch_size + + +@DeveloperAPI +class BlockExecStats: + """Execution stats for this block. + + Attributes: + wall_time_s: The wall-clock time it took to compute this block. + cpu_time_s: The CPU time it took to compute this block. + node_id: A unique id for the node that computed this block. + """ + + def __init__(self): + self.start_time_s: Optional[float] = None + self.end_time_s: Optional[float] = None + self.wall_time_s: Optional[float] = None + self.udf_time_s: Optional[float] = 0 + self.cpu_time_s: Optional[float] = None + self.node_id = ray.runtime_context.get_runtime_context().get_node_id() + # Max memory usage. May be an overestimate since we do not + # differentiate from previous tasks on the same worker. + self.max_rss_bytes: int = 0 + self.task_idx: Optional[int] = None + + @staticmethod + def builder() -> "_BlockExecStatsBuilder": + return _BlockExecStatsBuilder() + + def __repr__(self): + return repr( + { + "wall_time_s": self.wall_time_s, + "cpu_time_s": self.cpu_time_s, + "udf_time_s": self.udf_time_s, + "node_id": self.node_id, + } + ) + + +class _BlockExecStatsBuilder: + """Helper class for building block stats. + + When this class is created, we record the start time. When build() is + called, the time delta is saved as part of the stats. + """ + + def __init__(self): + self.start_time = time.perf_counter() + self.start_cpu = time.process_time() + + def build(self) -> "BlockExecStats": + self.end_time = time.perf_counter() + self.end_cpu = time.process_time() + + stats = BlockExecStats() + stats.start_time_s = self.start_time + stats.end_time_s = self.end_time + stats.wall_time_s = self.end_time - self.start_time + stats.cpu_time_s = self.end_cpu - self.start_cpu + if resource is None: + # NOTE(swang): resource package is not supported on Windows. This + # is only the memory usage at the end of the task, not the peak + # memory. + process = psutil.Process(os.getpid()) + stats.max_rss_bytes = int(process.memory_info().rss) + else: + stats.max_rss_bytes = int( + resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * 1e3 + ) + return stats + + +@DeveloperAPI +@dataclass +class BlockMetadata: + """Metadata about the block.""" + + #: The number of rows contained in this block, or None. + num_rows: Optional[int] + #: The approximate size in bytes of this block, or None. + size_bytes: Optional[int] + #: The pyarrow schema or types of the block elements, or None. + schema: Optional[Union[type, "pyarrow.lib.Schema"]] + #: The list of file paths used to generate this block, or + #: the empty list if indeterminate. + input_files: Optional[List[str]] + #: Execution stats for this block. + exec_stats: Optional[BlockExecStats] + + def __post_init__(self): + if self.input_files is None: + self.input_files = [] + if self.size_bytes is not None: + # Require size_bytes to be int, ray.util.metrics objects + # will not take other types like numpy.int64 + assert isinstance(self.size_bytes, int) + + +@DeveloperAPI +class BlockAccessor: + """Provides accessor methods for a specific block. + + Ideally, we wouldn't need a separate accessor classes for blocks. However, + this is needed if we want to support storing ``pyarrow.Table`` directly + as a top-level Ray object, without a wrapping class (issue #17186). + """ + + def num_rows(self) -> int: + """Return the number of rows contained in this block.""" + raise NotImplementedError + + def iter_rows(self, public_row_format: bool) -> Iterator[T]: + """Iterate over the rows of this block. + + Args: + public_row_format: Whether to cast rows into the public Dict row + format (this incurs extra copy conversions). + """ + raise NotImplementedError + + def slice(self, start: int, end: int, copy: bool) -> Block: + """Return a slice of this block. + + Args: + start: The starting index of the slice. + end: The ending index of the slice. + copy: Whether to perform a data copy for the slice. + + Returns: + The sliced block result. + """ + raise NotImplementedError + + def take(self, indices: List[int]) -> Block: + """Return a new block containing the provided row indices. + + Args: + indices: The row indices to return. + + Returns: + A new block containing the provided row indices. + """ + raise NotImplementedError + + def select(self, columns: List[Optional[str]]) -> Block: + """Return a new block containing the provided columns.""" + raise NotImplementedError + + def random_shuffle(self, random_seed: Optional[int]) -> Block: + """Randomly shuffle this block.""" + raise NotImplementedError + + def to_pandas(self) -> "pandas.DataFrame": + """Convert this block into a Pandas dataframe.""" + raise NotImplementedError + + def to_numpy( + self, columns: Optional[Union[str, List[str]]] = None + ) -> Union[np.ndarray, Dict[str, np.ndarray]]: + """Convert this block (or columns of block) into a NumPy ndarray. + + Args: + columns: Name of columns to convert, or None if converting all columns. + """ + raise NotImplementedError + + def to_arrow(self) -> "pyarrow.Table": + """Convert this block into an Arrow table.""" + raise NotImplementedError + + def to_block(self) -> Block: + """Return the base block that this accessor wraps.""" + raise NotImplementedError + + def to_default(self) -> Block: + """Return the default data format for this accessor.""" + return self.to_block() + + def to_batch_format(self, batch_format: Optional[str]) -> DataBatch: + """Convert this block into the provided batch format. + + Args: + batch_format: The batch format to convert this block to. + + Returns: + This block formatted as the provided batch format. + """ + if batch_format is None: + return self.to_block() + elif batch_format == "default" or batch_format == "native": + return self.to_default() + elif batch_format == "pandas": + return self.to_pandas() + elif batch_format == "pyarrow": + return self.to_arrow() + elif batch_format == "numpy": + return self.to_numpy() + else: + raise ValueError( + f"The batch format must be one of {VALID_BATCH_FORMATS}, got: " + f"{batch_format}" + ) + + def size_bytes(self) -> int: + """Return the approximate size in bytes of this block.""" + raise NotImplementedError + + def schema(self) -> Union[type, "pyarrow.lib.Schema"]: + """Return the Python type or pyarrow schema of this block.""" + raise NotImplementedError + + def get_metadata( + self, + input_files: Optional[List[str]] = None, + exec_stats: Optional[BlockExecStats] = None, + ) -> BlockMetadata: + """Create a metadata object from this block.""" + return BlockMetadata( + num_rows=self.num_rows(), + size_bytes=self.size_bytes(), + schema=self.schema(), + input_files=input_files, + exec_stats=exec_stats, + ) + + def zip(self, other: "Block") -> "Block": + """Zip this block with another block of the same type and size.""" + raise NotImplementedError + + @staticmethod + def builder() -> "BlockBuilder": + """Create a builder for this block type.""" + raise NotImplementedError + + @classmethod + def batch_to_block( + cls, + batch: DataBatch, + block_type: Optional[BlockType] = None, + ) -> Block: + """Create a block from user-facing data formats.""" + + if isinstance(batch, np.ndarray): + raise ValueError( + f"Error validating {_truncated_repr(batch)}: " + "Standalone numpy arrays are not " + "allowed in Ray 2.5. Return a dict of field -> array, " + "e.g., `{'data': array}` instead of `array`." + ) + + elif isinstance(batch, collections.abc.Mapping): + if block_type is None or block_type == BlockType.ARROW: + try: + return cls.batch_to_arrow_block(batch) + except ArrowConversionError as e: + if log_once("_fallback_to_pandas_block_warning"): + logger.warning( + f"Failed to convert batch to Arrow due to: {e}; " + f"falling back to Pandas block" + ) + + if block_type is None: + return cls.batch_to_pandas_block(batch) + else: + raise e + else: + assert block_type == BlockType.PANDAS + return cls.batch_to_pandas_block(batch) + return batch + + @classmethod + def batch_to_arrow_block(cls, batch: Dict[str, Any]) -> Block: + """Create an Arrow block from user-facing data formats.""" + from ray.data._internal.arrow_block import ArrowBlockBuilder + + return ArrowBlockBuilder._table_from_pydict(batch) + + @classmethod + def batch_to_pandas_block(cls, batch: Dict[str, Any]) -> Block: + """Create a Pandas block from user-facing data formats.""" + from ray.data._internal.pandas_block import PandasBlockAccessor + + return PandasBlockAccessor.numpy_to_block(batch) + + @staticmethod + def for_block(block: Block) -> "BlockAccessor[T]": + """Create a block accessor for the given block.""" + _check_pyarrow_version() + import pandas + import pyarrow + + if isinstance(block, pyarrow.Table): + from ray.data._internal.arrow_block import ArrowBlockAccessor + + return ArrowBlockAccessor(block) + elif isinstance(block, pandas.DataFrame): + from ray.data._internal.pandas_block import PandasBlockAccessor + + return PandasBlockAccessor(block) + elif isinstance(block, bytes): + from ray.data._internal.arrow_block import ArrowBlockAccessor + + return ArrowBlockAccessor.from_bytes(block) + elif isinstance(block, list): + raise ValueError( + f"Error validating {_truncated_repr(block)}: " + "Standalone Python objects are not " + "allowed in Ray 2.5. To use Python objects in a dataset, " + "wrap them in a dict of numpy arrays, e.g., " + "return `{'item': batch}` instead of just `batch`." + ) + else: + raise TypeError("Not a block type: {} ({})".format(block, type(block))) + + def sample(self, n_samples: int, sort_key: "SortKey") -> "Block": + """Return a random sample of items from this block.""" + raise NotImplementedError + + def sort_and_partition( + self, boundaries: List[T], sort_key: "SortKey" + ) -> List["Block"]: + """Return a list of sorted partitions of this block.""" + raise NotImplementedError + + def combine(self, key: "SortKey", aggs: Tuple["AggregateFn"]) -> Block: + """Combine rows with the same key into an accumulator.""" + raise NotImplementedError + + @staticmethod + def merge_sorted_blocks( + blocks: List["Block"], sort_key: "SortKey" + ) -> Tuple[Block, BlockMetadata]: + """Return a sorted block by merging a list of sorted blocks.""" + raise NotImplementedError + + @staticmethod + def aggregate_combined_blocks( + blocks: List[Block], sort_key: "SortKey", aggs: Tuple["AggregateFn"] + ) -> Tuple[Block, BlockMetadata]: + """Aggregate partially combined and sorted blocks.""" + raise NotImplementedError + + def block_type(self) -> BlockType: + """Return the block type of this block.""" + raise NotImplementedError diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/context.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/context.py new file mode 100644 index 0000000000000000000000000000000000000000..347d3da68372e6a42fff61058fae92e119555b9d --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/context.py @@ -0,0 +1,449 @@ +import logging +import os +import threading +import warnings +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union + +import ray +from ray._private.ray_constants import env_bool, env_integer +from ray._private.worker import WORKER_MODE +from ray.util.annotations import DeveloperAPI +from ray.util.debug import log_once +from ray.util.scheduling_strategies import SchedulingStrategyT + +if TYPE_CHECKING: + from ray.data._internal.execution.interfaces import ExecutionOptions + +logger = logging.getLogger(__name__) + +# The context singleton on this process. +_default_context: "Optional[DataContext]" = None +_context_lock = threading.Lock() + + +# We chose 128MiB for default: With streaming execution and num_cpus many concurrent +# tasks, the memory footprint will be about 2 * num_cpus * target_max_block_size ~= RAM +# * DEFAULT_OBJECT_STORE_MEMORY_LIMIT_FRACTION * 0.3 (default object store memory +# fraction set by Ray core), assuming typical memory:core ratio of 4:1. +DEFAULT_TARGET_MAX_BLOCK_SIZE = 128 * 1024 * 1024 + +# We set a higher target block size because we have to materialize +# all input blocks anyway, so there is no performance advantage to having +# smaller blocks. Setting a larger block size allows avoiding overhead from an +# excessive number of partitions. +# We choose 1GiB as 4x less than the typical memory:core ratio (4:1). +DEFAULT_SHUFFLE_TARGET_MAX_BLOCK_SIZE = 1024 * 1024 * 1024 + +# We will attempt to slice blocks whose size exceeds this factor * +# target_max_block_size. We will warn the user if slicing fails and we produce +# blocks larger than this threshold. +MAX_SAFE_BLOCK_SIZE_FACTOR = 1.5 + +DEFAULT_TARGET_MIN_BLOCK_SIZE = 1 * 1024 * 1024 + +# This default appears to work well with most file sizes on remote storage systems, +# which is very sensitive to the buffer size. +DEFAULT_STREAMING_READ_BUFFER_SIZE = 32 * 1024 * 1024 + +DEFAULT_ENABLE_PANDAS_BLOCK = True + +DEFAULT_READ_OP_MIN_NUM_BLOCKS = 200 + +DEFAULT_ACTOR_PREFETCHER_ENABLED = False + +DEFAULT_USE_PUSH_BASED_SHUFFLE = bool( + os.environ.get("RAY_DATA_PUSH_BASED_SHUFFLE", None) +) + +DEFAULT_SCHEDULING_STRATEGY = "SPREAD" + +# This default enables locality-based scheduling in Ray for tasks where arg data +# transfer is a bottleneck. +DEFAULT_SCHEDULING_STRATEGY_LARGE_ARGS = "DEFAULT" + +DEFAULT_LARGE_ARGS_THRESHOLD = 50 * 1024 * 1024 + +DEFAULT_USE_POLARS = False + +DEFAULT_EAGER_FREE = bool(int(os.environ.get("RAY_DATA_EAGER_FREE", "1"))) + +DEFAULT_DECODING_SIZE_ESTIMATION_ENABLED = True + +DEFAULT_MIN_PARALLELISM = 200 + +DEFAULT_ENABLE_TENSOR_EXTENSION_CASTING = True + +# NOTE: V1 tensor type format only supports tensors of no more than 2Gb in +# total cumulative size (due to it internally utilizing int32 offsets) +# +# V2 in turn relies on int64 offsets, therefore having a limit of ~9Eb (exabytes) +DEFAULT_USE_ARROW_TENSOR_V2 = env_bool("RAY_DATA_USE_ARROW_TENSOR_V2", True) + +DEFAULT_ENABLE_FALLBACK_TO_ARROW_OBJECT_EXT_TYPE = True + +DEFAULT_AUTO_LOG_STATS = False + +DEFAULT_VERBOSE_STATS_LOG = False + +DEFAULT_TRACE_ALLOCATIONS = bool(int(os.environ.get("RAY_DATA_TRACE_ALLOCATIONS", "0"))) + +DEFAULT_LOG_INTERNAL_STACK_TRACE_TO_STDOUT = env_bool( + "RAY_DATA_LOG_INTERNAL_STACK_TRACE_TO_STDOUT", False +) + +DEFAULT_RAY_DATA_RAISE_ORIGINAL_MAP_EXCEPTION = env_bool( + "RAY_DATA_RAISE_ORIGINAL_MAP_EXCEPTION", False +) + +DEFAULT_USE_RAY_TQDM = bool(int(os.environ.get("RAY_TQDM", "1"))) + +# Globally enable or disable all progress bars. +# If this is False, both the global and operator-level progress bars are disabled. +DEFAULT_ENABLE_PROGRESS_BARS = not bool( + env_integer("RAY_DATA_DISABLE_PROGRESS_BARS", 0) +) +DEFAULT_ENABLE_PROGRESS_BAR_NAME_TRUNCATION = env_bool( + "RAY_DATA_ENABLE_PROGRESS_BAR_NAME_TRUNCATION", True +) + +DEFAULT_ENABLE_GET_OBJECT_LOCATIONS_FOR_METRICS = False + + +# `write_file_retry_on_errors` is deprecated in favor of `retried_io_errors`. You +# shouldn't need to modify `DEFAULT_WRITE_FILE_RETRY_ON_ERRORS`. +DEFAULT_WRITE_FILE_RETRY_ON_ERRORS = ( + "AWS Error INTERNAL_FAILURE", + "AWS Error NETWORK_CONNECTION", + "AWS Error SLOW_DOWN", + "AWS Error UNKNOWN (HTTP status 503)", +) + +DEFAULT_RETRIED_IO_ERRORS = ( + "AWS Error INTERNAL_FAILURE", + "AWS Error NETWORK_CONNECTION", + "AWS Error SLOW_DOWN", + "AWS Error UNKNOWN (HTTP status 503)", + "AWS Error SERVICE_UNAVAILABLE", +) + +DEFAULT_WARN_ON_DRIVER_MEMORY_USAGE_BYTES = 2 * 1024 * 1024 * 1024 + +DEFAULT_ACTOR_TASK_RETRY_ON_ERRORS = False + +DEFAULT_ENABLE_OP_RESOURCE_RESERVATION = env_bool( + "RAY_DATA_ENABLE_OP_RESOURCE_RESERVATION", True +) + +DEFAULT_OP_RESOURCE_RESERVATION_RATIO = float( + os.environ.get("RAY_DATA_OP_RESERVATION_RATIO", "0.5") +) + +DEFAULT_MAX_ERRORED_BLOCKS = 0 + +# Use this to prefix important warning messages for the user. +WARN_PREFIX = "⚠️ " + +# Use this to prefix important success messages for the user. +OK_PREFIX = "✔️ " + +# Default batch size for batch transformations. +DEFAULT_BATCH_SIZE = 1024 + +# Default value of the max number of blocks that can be buffered at the +# streaming generator of each `DataOpTask`. +# Note, if this value is too large, we'll need to allocate more memory +# buffer for the pending task outputs, which may lead to bad performance +# as we may not have enough memory buffer for the operator outputs. +# If the value is too small, the task may be frequently blocked due to +# streaming generator backpressure. +DEFAULT_MAX_NUM_BLOCKS_IN_STREAMING_GEN_BUFFER = 2 + +# Default value for whether or not to try to create directories for write +# calls if the URI is an S3 URI. +DEFAULT_S3_TRY_CREATE_DIR = False + +DEFAULT_WAIT_FOR_MIN_ACTORS_S = env_integer( + "RAY_DATA_DEFAULT_WAIT_FOR_MIN_ACTORS_S", 60 * 10 +) + + +def _execution_options_factory() -> "ExecutionOptions": + # Lazily import to avoid circular dependencies. + from ray.data._internal.execution.interfaces import ExecutionOptions + + return ExecutionOptions() + + +@DeveloperAPI +@dataclass +class DataContext: + """Global settings for Ray Data. + + Configure this class to enable advanced features and tune performance. + + .. warning:: + Apply changes before creating a :class:`~ray.data.Dataset`. Changes made after + won't take effect. + + .. note:: + This object is automatically propagated to workers. Access it from the driver + and remote workers with :meth:`DataContext.get_current()`. + + Examples: + >>> from ray.data import DataContext + >>> DataContext.get_current().enable_progress_bars = False + + Args: + target_max_block_size: The max target block size in bytes for reads and + transformations. + target_shuffle_max_block_size: The max target block size in bytes for shuffle + ops like ``random_shuffle``, ``sort``, and ``repartition``. + target_min_block_size: Ray Data avoids creating blocks smaller than this + size in bytes on read. This takes precedence over + ``read_op_min_num_blocks``. + streaming_read_buffer_size: Buffer size when doing streaming reads from local or + remote storage. + enable_pandas_block: Whether pandas block format is enabled. + actor_prefetcher_enabled: Whether to use actor based block prefetcher. + use_push_based_shuffle: Whether to use push-based shuffle. + pipeline_push_based_shuffle_reduce_tasks: + scheduling_strategy: The global scheduling strategy. For tasks with large args, + ``scheduling_strategy_large_args`` takes precedence. + scheduling_strategy_large_args: Scheduling strategy for tasks with large args. + large_args_threshold: Size in bytes after which point task arguments are + considered large. Choose a value so that the data transfer overhead is + significant in comparison to task scheduling (i.e., low tens of ms). + use_polars: Whether to use Polars for tabular dataset sorts, groupbys, and + aggregations. + eager_free: Whether to eagerly free memory. + decoding_size_estimation: Whether to estimate in-memory decoding data size for + data source. + min_parallelism: This setting is deprecated. Use ``read_op_min_num_blocks`` + instead. + read_op_min_num_blocks: Minimum number of read output blocks for a dataset. + enable_tensor_extension_casting: Whether to automatically cast NumPy ndarray + columns in Pandas DataFrames to tensor extension columns. + use_arrow_tensor_v2: Config enabling V2 version of ArrowTensorArray supporting + tensors > 2Gb in size (off by default) + enable_fallback_to_arrow_object_ext_type: Enables fallback to serialize column + values not suppported by Arrow natively (like user-defined custom Python + classes for ex, etc) using `ArrowPythonObjectType` (simply serializing + these as bytes) + enable_auto_log_stats: Whether to automatically log stats after execution. If + disabled, you can still manually print stats with ``Dataset.stats()``. + verbose_stats_logs: Whether stats logs should be verbose. This includes fields + such as `extra_metrics` in the stats output, which are excluded by default. + trace_allocations: Whether to trace allocations / eager free. This adds + significant performance overheads and should only be used for debugging. + execution_options: The + :class:`~ray.data._internal.execution.interfaces.execution_options.ExecutionOptions` + to use. + use_ray_tqdm: Whether to enable distributed tqdm. + enable_progress_bars: Whether to enable progress bars. + enable_progress_bar_name_truncation: If True, the name of the progress bar + (often the operator name) will be truncated if it exceeds + `ProgressBar.MAX_NAME_LENGTH`. Otherwise, the full operator name is shown. + enable_get_object_locations_for_metrics: Whether to enable + ``get_object_locations`` for metrics. + write_file_retry_on_errors: A list of substrings of error messages that should + trigger a retry when writing files. This is useful for handling transient + errors when writing to remote storage systems. + warn_on_driver_memory_usage_bytes: If driver memory exceeds this threshold, + Ray Data warns you. For now, this only applies to shuffle ops because most + other ops are unlikely to use as much driver memory. + actor_task_retry_on_errors: The application-level errors that actor task should + retry. This follows same format as :ref:`retry_exceptions ` in + Ray Core. Default to `False` to not retry on any errors. Set to `True` to + retry all errors, or set to a list of errors to retry. + enable_op_resource_reservation: Whether to reserve resources for each operator. + op_resource_reservation_ratio: The ratio of the total resources to reserve for + each operator. + max_errored_blocks: Max number of blocks that are allowed to have errors, + unlimited if negative. This option allows application-level exceptions in + block processing tasks. These exceptions may be caused by UDFs (e.g., due to + corrupted data samples) or IO errors. Data in the failed blocks are dropped. + This option can be useful to prevent a long-running job from failing due to + a small number of bad blocks. + log_internal_stack_trace_to_stdout: Whether to include internal Ray Data/Ray + Core code stack frames when logging to stdout. The full stack trace is + always written to the Ray Data log file. + raise_original_map_exception: Whether to raise the original exception + encountered in map UDF instead of wrapping it in a `UserCodeException`. + print_on_execution_start: If ``True``, print execution information when + execution starts. + s3_try_create_dir: If ``True``, try to create directories on S3 when a write + call is made with a S3 URI. + wait_for_min_actors_s: The default time to wait for minimum requested + actors to start before raising a timeout, in seconds. + retried_io_errors: A list of substrings of error messages that should + trigger a retry when reading or writing files. This is useful for handling + transient errors when reading from remote storage systems. + """ + + target_max_block_size: int = DEFAULT_TARGET_MAX_BLOCK_SIZE + target_shuffle_max_block_size: int = DEFAULT_SHUFFLE_TARGET_MAX_BLOCK_SIZE + target_min_block_size: int = DEFAULT_TARGET_MIN_BLOCK_SIZE + streaming_read_buffer_size: int = DEFAULT_STREAMING_READ_BUFFER_SIZE + enable_pandas_block: bool = DEFAULT_ENABLE_PANDAS_BLOCK + actor_prefetcher_enabled: bool = DEFAULT_ACTOR_PREFETCHER_ENABLED + use_push_based_shuffle: bool = DEFAULT_USE_PUSH_BASED_SHUFFLE + pipeline_push_based_shuffle_reduce_tasks: bool = True + scheduling_strategy: SchedulingStrategyT = DEFAULT_SCHEDULING_STRATEGY + scheduling_strategy_large_args: SchedulingStrategyT = ( + DEFAULT_SCHEDULING_STRATEGY_LARGE_ARGS + ) + large_args_threshold: int = DEFAULT_LARGE_ARGS_THRESHOLD + use_polars: bool = DEFAULT_USE_POLARS + eager_free: bool = DEFAULT_EAGER_FREE + decoding_size_estimation: bool = DEFAULT_DECODING_SIZE_ESTIMATION_ENABLED + min_parallelism: int = DEFAULT_MIN_PARALLELISM + read_op_min_num_blocks: int = DEFAULT_READ_OP_MIN_NUM_BLOCKS + enable_tensor_extension_casting: bool = DEFAULT_ENABLE_TENSOR_EXTENSION_CASTING + use_arrow_tensor_v2: bool = DEFAULT_USE_ARROW_TENSOR_V2 + enable_fallback_to_arrow_object_ext_type = ( + DEFAULT_ENABLE_FALLBACK_TO_ARROW_OBJECT_EXT_TYPE + ) + enable_auto_log_stats: bool = DEFAULT_AUTO_LOG_STATS + verbose_stats_logs: bool = DEFAULT_VERBOSE_STATS_LOG + trace_allocations: bool = DEFAULT_TRACE_ALLOCATIONS + execution_options: "ExecutionOptions" = field( + default_factory=_execution_options_factory + ) + use_ray_tqdm: bool = DEFAULT_USE_RAY_TQDM + enable_progress_bars: bool = DEFAULT_ENABLE_PROGRESS_BARS + # By default, enable the progress bar for operator-level progress. + # In __post_init__(), we disable operator-level progress + # bars when running in a Ray job. + enable_operator_progress_bars: bool = True + enable_progress_bar_name_truncation: bool = ( + DEFAULT_ENABLE_PROGRESS_BAR_NAME_TRUNCATION + ) + enable_get_object_locations_for_metrics: bool = ( + DEFAULT_ENABLE_GET_OBJECT_LOCATIONS_FOR_METRICS + ) + write_file_retry_on_errors: List[str] = DEFAULT_WRITE_FILE_RETRY_ON_ERRORS + warn_on_driver_memory_usage_bytes: int = DEFAULT_WARN_ON_DRIVER_MEMORY_USAGE_BYTES + actor_task_retry_on_errors: Union[ + bool, List[BaseException] + ] = DEFAULT_ACTOR_TASK_RETRY_ON_ERRORS + op_resource_reservation_enabled: bool = DEFAULT_ENABLE_OP_RESOURCE_RESERVATION + op_resource_reservation_ratio: float = DEFAULT_OP_RESOURCE_RESERVATION_RATIO + max_errored_blocks: int = DEFAULT_MAX_ERRORED_BLOCKS + log_internal_stack_trace_to_stdout: bool = ( + DEFAULT_LOG_INTERNAL_STACK_TRACE_TO_STDOUT + ) + raise_original_map_exception: bool = DEFAULT_RAY_DATA_RAISE_ORIGINAL_MAP_EXCEPTION + print_on_execution_start: bool = True + s3_try_create_dir: bool = DEFAULT_S3_TRY_CREATE_DIR + wait_for_min_actors_s: int = DEFAULT_WAIT_FOR_MIN_ACTORS_S + retried_io_errors: List[str] = field( + default_factory=lambda: list(DEFAULT_RETRIED_IO_ERRORS) + ) + + def __post_init__(self): + # The additonal ray remote args that should be added to + # the task-pool-based data tasks. + self._task_pool_data_task_remote_args: Dict[str, Any] = {} + # The extra key-value style configs. + # These configs are managed by individual components or plugins via + # `set_config`, `get_config` and `remove_config`. + # The reason why we use a dict instead of individual fields is to decouple + # the DataContext from the plugin implementations, as well as to avoid + # circular dependencies. + self._kv_configs: Dict[str, Any] = {} + self._max_num_blocks_in_streaming_gen_buffer = ( + DEFAULT_MAX_NUM_BLOCKS_IN_STREAMING_GEN_BUFFER + ) + + is_ray_job = os.environ.get("RAY_JOB_ID") is not None + if is_ray_job: + is_driver = ray.get_runtime_context().worker.mode != WORKER_MODE + if is_driver and log_once( + "ray_data_disable_operator_progress_bars_in_ray_jobs" + ): + logger.info( + "Disabling operator-level progress bars by default in Ray Jobs. " + "To enable progress bars for all operators, set " + "`ray.data.DataContext.get_current()" + ".enable_operator_progress_bars = True`." + ) + # Disable operator-level progress bars by default in Ray jobs. + # The global progress bar for the overall Dataset execution will + # still be enabled, unless the user also sets + # `ray.data.DataContext.get_current().enable_progress_bars = False`. + self.enable_operator_progress_bars = False + else: + # When not running in Ray job, operator-level progress + # bars are enabled by default. + self.enable_operator_progress_bars = True + + def __setattr__(self, name: str, value: Any) -> None: + if ( + name == "write_file_retry_on_errors" + and value != DEFAULT_WRITE_FILE_RETRY_ON_ERRORS + ): + warnings.warn( + "`write_file_retry_on_errors` is deprecated. Configure " + "`retried_io_errors` instead.", + DeprecationWarning, + ) + + super().__setattr__(name, value) + + @staticmethod + def get_current() -> "DataContext": + """Get or create a singleton context. + + If the context has not yet been created in this process, it will be + initialized with default settings. + """ + + global _default_context + + with _context_lock: + if _default_context is None: + _default_context = DataContext() + + return _default_context + + @staticmethod + def _set_current(context: "DataContext") -> None: + """Set the current context in a remote worker. + + This is used internally by Dataset to propagate the driver context to + remote workers used for parallelization. + """ + global _default_context + _default_context = context + + def get_config(self, key: str, default: Any = None) -> Any: + """Get the value for a key-value style config. + + Args: + key: The key of the config. + default: The default value to return if the key is not found. + Returns: The value for the key, or the default value if the key is not found. + """ + return self._kv_configs.get(key, default) + + def set_config(self, key: str, value: Any) -> None: + """Set the value for a key-value style config. + + Args: + key: The key of the config. + value: The value of the config. + """ + self._kv_configs[key] = value + + def remove_config(self, key: str) -> None: + """Remove a key-value style config. + + Args: + key: The key of the config. + """ + self._kv_configs.pop(key, None) + + +# Backwards compatibility alias. +DatasetContext = DataContext diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/dataset.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..779a5bd3295e734139a2890650341183f60ddb87 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/dataset.py @@ -0,0 +1,5504 @@ +import collections +import copy +import html +import itertools +import logging +import time +import warnings +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Generic, + Iterable, + Iterator, + List, + Literal, + Mapping, + Optional, + Tuple, + TypeVar, + Union, +) + +import numpy as np + +import ray +import ray.cloudpickle as pickle +from ray._private.thirdparty.tabulate.tabulate import tabulate +from ray._private.usage import usage_lib +from ray.air.util.tensor_extensions.arrow import ( + ArrowTensorTypeV2, + get_arrow_extension_fixed_shape_tensor_types, +) +from ray.air.util.tensor_extensions.utils import _create_possibly_ragged_ndarray +from ray.data._internal.aggregate import Max, Mean, Min, Std, Sum +from ray.data._internal.compute import ComputeStrategy +from ray.data._internal.datasource.bigquery_datasink import BigQueryDatasink +from ray.data._internal.datasource.csv_datasink import CSVDatasink +from ray.data._internal.datasource.image_datasink import ImageDatasink +from ray.data._internal.datasource.json_datasink import JSONDatasink +from ray.data._internal.datasource.mongo_datasink import MongoDatasink +from ray.data._internal.datasource.numpy_datasink import NumpyDatasink +from ray.data._internal.datasource.parquet_datasink import ParquetDatasink +from ray.data._internal.datasource.sql_datasink import SQLDatasink +from ray.data._internal.datasource.tfrecords_datasink import TFRecordDatasink +from ray.data._internal.datasource.webdataset_datasink import WebDatasetDatasink +from ray.data._internal.equalize import _equalize +from ray.data._internal.execution.interfaces import RefBundle +from ray.data._internal.execution.interfaces.ref_bundle import ( + _ref_bundles_iterator_to_block_refs_list, +) +from ray.data._internal.iterator.iterator_impl import DataIteratorImpl +from ray.data._internal.iterator.stream_split_iterator import StreamSplitDataIterator +from ray.data._internal.logical.operators.all_to_all_operator import ( + RandomizeBlocks, + RandomShuffle, + Repartition, + Sort, +) +from ray.data._internal.logical.operators.count_operator import Count +from ray.data._internal.logical.operators.input_data_operator import InputData +from ray.data._internal.logical.operators.map_operator import ( + Filter, + FlatMap, + MapBatches, + MapRows, + Project, +) +from ray.data._internal.logical.operators.n_ary_operator import ( + Union as UnionLogicalOperator, +) +from ray.data._internal.logical.operators.n_ary_operator import Zip +from ray.data._internal.logical.operators.one_to_one_operator import Limit +from ray.data._internal.logical.operators.write_operator import Write +from ray.data._internal.logical.optimizers import LogicalPlan +from ray.data._internal.pandas_block import PandasBlockBuilder, PandasBlockSchema +from ray.data._internal.plan import ExecutionPlan +from ray.data._internal.planner.exchange.sort_task_spec import SortKey +from ray.data._internal.remote_fn import cached_remote_fn +from ray.data._internal.split import _get_num_rows, _split_at_indices +from ray.data._internal.stats import DatasetStats, DatasetStatsSummary, StatsManager +from ray.data._internal.util import AllToAllAPI, ConsumptionAPI, get_compute_strategy +from ray.data.aggregate import AggregateFn +from ray.data.block import ( + VALID_BATCH_FORMATS, + Block, + BlockAccessor, + DataBatch, + DataBatchColumn, + T, + U, + UserDefinedFunction, + _apply_batch_format, + _apply_batch_size, +) +from ray.data.context import DataContext +from ray.data.datasource import Connection, Datasink, FilenameProvider +from ray.data.iterator import DataIterator +from ray.data.random_access_dataset import RandomAccessDataset +from ray.types import ObjectRef +from ray.util.annotations import Deprecated, DeveloperAPI, PublicAPI +from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy +from ray.widgets import Template +from ray.widgets.util import repr_with_fallback + +if TYPE_CHECKING: + import dask + import mars + import modin + import pandas + import pyarrow + import pyspark + import tensorflow as tf + import torch + import torch.utils.data + from tensorflow_metadata.proto.v0 import schema_pb2 + + from ray.data._internal.execution.interfaces import Executor, NodeIdStr + from ray.data.grouped_data import GroupedData + + +logger = logging.getLogger(__name__) + +TensorflowFeatureTypeSpec = Union[ + "tf.TypeSpec", List["tf.TypeSpec"], Dict[str, "tf.TypeSpec"] +] + +TensorFlowTensorBatchType = Union["tf.Tensor", Dict[str, "tf.Tensor"]] + +CollatedData = TypeVar("CollatedData") +TorchBatchType = Union[Dict[str, "torch.Tensor"], CollatedData] + +BT_API_GROUP = "Basic Transformations" +SSR_API_GROUP = "Sorting, Shuffling and Repartitioning" +SMD_API_GROUP = "Splitting and Merging datasets" +GGA_API_GROUP = "Grouped and Global aggregations" +CD_API_GROUP = "Consuming Data" +IOC_API_GROUP = "I/O and Conversion" +IM_API_GROUP = "Inspecting Metadata" +E_API_GROUP = "Execution" + + +@PublicAPI +class Dataset: + """A Dataset is a distributed data collection for data loading and processing. + + Datasets are distributed pipelines that produce ``ObjectRef[Block]`` outputs, + where each block holds data in Arrow format, representing a shard of the overall + data collection. The block also determines the unit of parallelism. For more + details, see :ref:`Ray Data Internals `. + + Datasets can be created in multiple ways: from synthetic data via ``range_*()`` + APIs, from existing memory data via ``from_*()`` APIs (this creates a subclass + of Dataset called ``MaterializedDataset``), or from external storage + systems such as local disk, S3, HDFS etc. via the ``read_*()`` APIs. The + (potentially processed) Dataset can be saved back to external storage systems + via the ``write_*()`` APIs. + + Examples: + .. testcode:: + :skipif: True + + import ray + # Create dataset from synthetic data. + ds = ray.data.range(1000) + # Create dataset from in-memory data. + ds = ray.data.from_items( + [{"col1": i, "col2": i * 2} for i in range(1000)] + ) + # Create dataset from external storage system. + ds = ray.data.read_parquet("s3://bucket/path") + # Save dataset back to external storage system. + ds.write_csv("s3://bucket/output") + + Dataset has two kinds of operations: transformation, which takes in Dataset + and outputs a new Dataset (e.g. :py:meth:`.map_batches()`); and consumption, + which produces values (not a data stream) as output + (e.g. :meth:`.iter_batches()`). + + Dataset transformations are lazy, with execution of the transformations being + triggered by downstream consumption. + + Dataset supports parallel processing at scale: transformations such as + :py:meth:`.map_batches()`, aggregations such as + :py:meth:`.min()`/:py:meth:`.max()`/:py:meth:`.mean()`, grouping via + :py:meth:`.groupby()`, shuffling operations such as :py:meth:`.sort()`, + :py:meth:`.random_shuffle()`, and :py:meth:`.repartition()`. + + Examples: + >>> import ray + >>> ds = ray.data.range(1000) + >>> # Transform batches (Dict[str, np.ndarray]) with map_batches(). + >>> ds.map_batches(lambda batch: {"id": batch["id"] * 2}) # doctest: +ELLIPSIS + MapBatches() + +- Dataset(num_rows=1000, schema={id: int64}) + >>> # Compute the maximum. + >>> ds.max("id") + 999 + >>> # Shuffle this dataset randomly. + >>> ds.random_shuffle() # doctest: +ELLIPSIS + RandomShuffle + +- Dataset(num_rows=1000, schema={id: int64}) + >>> # Sort it back in order. + >>> ds.sort("id") # doctest: +ELLIPSIS + Sort + +- Dataset(num_rows=1000, schema={id: int64}) + + Both unexecuted and materialized Datasets can be passed between Ray tasks and + actors without incurring a copy. Dataset supports conversion to/from several + more featureful dataframe libraries (e.g., Spark, Dask, Modin, MARS), and are also + compatible with distributed TensorFlow / PyTorch. + """ + + def __init__( + self, + plan: ExecutionPlan, + logical_plan: LogicalPlan, + ): + """Construct a Dataset (internal API). + + The constructor is not part of the Dataset API. Use the ``ray.data.*`` + read methods to construct a dataset. + """ + assert isinstance(plan, ExecutionPlan), type(plan) + usage_lib.record_library_usage("dataset") # Legacy telemetry name. + + self._plan = plan + self._logical_plan = logical_plan + self._plan.link_logical_plan(logical_plan) + + # Handle to currently running executor for this dataset. + self._current_executor: Optional["Executor"] = None + self._write_ds = None + + self._set_uuid(StatsManager.get_dataset_id_from_stats_actor()) + + @staticmethod + def copy( + ds: "Dataset", _deep_copy: bool = False, _as: Optional[type] = None + ) -> "Dataset": + if not _as: + _as = type(ds) + if _deep_copy: + return _as(ds._plan.deep_copy(), ds._logical_plan) + else: + return _as(ds._plan.copy(), ds._logical_plan) + + @PublicAPI(api_group=BT_API_GROUP) + def map( + self, + fn: UserDefinedFunction[Dict[str, Any], Dict[str, Any]], + *, + compute: Optional[ComputeStrategy] = None, + fn_args: Optional[Iterable[Any]] = None, + fn_kwargs: Optional[Dict[str, Any]] = None, + fn_constructor_args: Optional[Iterable[Any]] = None, + fn_constructor_kwargs: Optional[Dict[str, Any]] = None, + num_cpus: Optional[float] = None, + num_gpus: Optional[float] = None, + concurrency: Optional[Union[int, Tuple[int, int]]] = None, + ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None, + **ray_remote_args, + ) -> "Dataset": + """Apply the given function to each row of this dataset. + + Use this method to transform your data. To learn more, see + :ref:`Transforming rows `. + + You can use either a function or a callable class to perform the transformation. + For functions, Ray Data uses stateless Ray tasks. For classes, Ray Data uses + stateful Ray actors. For more information, see + :ref:`Stateful Transforms `. + + .. tip:: + + If your transformation is vectorized like most NumPy or pandas operations, + :meth:`~Dataset.map_batches` might be faster. + + .. warning:: + Specifying both ``num_cpus`` and ``num_gpus`` for map tasks is experimental, + and may result in scheduling or stability issues. Please + `report any issues `_ + to the Ray team. + + Examples: + + .. testcode:: + + import os + from typing import Any, Dict + import ray + + def parse_filename(row: Dict[str, Any]) -> Dict[str, Any]: + row["filename"] = os.path.basename(row["path"]) + return row + + ds = ( + ray.data.read_images("s3://anonymous@ray-example-data/image-datasets/simple", include_paths=True) + .map(parse_filename) + ) + print(ds.schema()) + + .. testoutput:: + + Column Type + ------ ---- + image numpy.ndarray(shape=(32, 32, 3), dtype=uint8) + path string + filename string + + Time complexity: O(dataset size / parallelism) + + Args: + fn: The function to apply to each row, or a class type + that can be instantiated to create such a callable. + compute: This argument is deprecated. Use ``concurrency`` argument. + fn_args: Positional arguments to pass to ``fn`` after the first argument. + These arguments are top-level arguments to the underlying Ray task. + fn_kwargs: Keyword arguments to pass to ``fn``. These arguments are + top-level arguments to the underlying Ray task. + fn_constructor_args: Positional arguments to pass to ``fn``'s constructor. + You can only provide this if ``fn`` is a callable class. These arguments + are top-level arguments in the underlying Ray actor construction task. + fn_constructor_kwargs: Keyword arguments to pass to ``fn``'s constructor. + This can only be provided if ``fn`` is a callable class. These arguments + are top-level arguments in the underlying Ray actor construction task. + num_cpus: The number of CPUs to reserve for each parallel map worker. + num_gpus: The number of GPUs to reserve for each parallel map worker. For + example, specify `num_gpus=1` to request 1 GPU for each parallel map + worker. + concurrency: The number of Ray workers to use concurrently. For a fixed-sized + worker pool of size ``n``, specify ``concurrency=n``. For an autoscaling + worker pool from ``m`` to ``n`` workers, specify ``concurrency=(m, n)``. + ray_remote_args_fn: A function that returns a dictionary of remote args + passed to each map worker. The purpose of this argument is to generate + dynamic arguments for each actor/task, and will be called each time prior + to initializing the worker. Args returned from this dict will always + override the args in ``ray_remote_args``. Note: this is an advanced, + experimental feature. + ray_remote_args: Additional resource requirements to request from + Ray for each map worker. + + .. seealso:: + + :meth:`~Dataset.flat_map` + Call this method to create new rows from existing ones. Unlike + :meth:`~Dataset.map`, a function passed to + :meth:`~Dataset.flat_map` can return multiple rows. + + :meth:`~Dataset.map_batches` + Call this method to transform batches of data. + """ # noqa: E501 + compute = get_compute_strategy( + fn, + fn_constructor_args=fn_constructor_args, + compute=compute, + concurrency=concurrency, + ) + + if num_cpus is not None: + ray_remote_args["num_cpus"] = num_cpus + + if num_gpus is not None: + ray_remote_args["num_gpus"] = num_gpus + + plan = self._plan.copy() + map_op = MapRows( + self._logical_plan.dag, + fn, + fn_args=fn_args, + fn_kwargs=fn_kwargs, + fn_constructor_args=fn_constructor_args, + fn_constructor_kwargs=fn_constructor_kwargs, + compute=compute, + ray_remote_args_fn=ray_remote_args_fn, + ray_remote_args=ray_remote_args, + ) + logical_plan = LogicalPlan(map_op, self.context) + return Dataset(plan, logical_plan) + + def _set_name(self, name: Optional[str]): + """Set the name of the dataset. + + Used as a prefix for metrics tags. + """ + self._plan._dataset_name = name + + @property + def _name(self) -> Optional[str]: + """Returns the dataset name""" + return self._plan._dataset_name + + @PublicAPI(api_group=BT_API_GROUP) + def map_batches( + self, + fn: UserDefinedFunction[DataBatch, DataBatch], + *, + batch_size: Union[int, None, Literal["default"]] = "default", + compute: Optional[ComputeStrategy] = None, + batch_format: Optional[str] = "default", + zero_copy_batch: bool = False, + fn_args: Optional[Iterable[Any]] = None, + fn_kwargs: Optional[Dict[str, Any]] = None, + fn_constructor_args: Optional[Iterable[Any]] = None, + fn_constructor_kwargs: Optional[Dict[str, Any]] = None, + num_cpus: Optional[float] = None, + num_gpus: Optional[float] = None, + concurrency: Optional[Union[int, Tuple[int, int]]] = None, + ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None, + **ray_remote_args, + ) -> "Dataset": + """Apply the given function to batches of data. + + This method is useful for preprocessing data and performing inference. To learn + more, see :ref:`Transforming batches `. + + You can use either a function or a callable class to perform the transformation. + For functions, Ray Data uses stateless Ray tasks. For classes, Ray Data uses + stateful Ray actors. For more information, see + :ref:`Stateful Transforms `. + + .. tip:: + To understand the format of the input to ``fn``, call :meth:`~Dataset.take_batch` + on the dataset to get a batch in the same format as will be passed to ``fn``. + + .. tip:: + If ``fn`` doesn't mutate its input, set ``zero_copy_batch=True`` to improve + performance and decrease memory utilization. + + .. warning:: + Specifying both ``num_cpus`` and ``num_gpus`` for map tasks is experimental, + and may result in scheduling or stability issues. Please + `report any issues `_ + to the Ray team. + + Examples: + + Call :meth:`~Dataset.map_batches` to transform your data. + + .. testcode:: + + from typing import Dict + import numpy as np + import ray + + def add_dog_years(batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + batch["age_in_dog_years"] = 7 * batch["age"] + return batch + + ds = ( + ray.data.from_items([ + {"name": "Luna", "age": 4}, + {"name": "Rory", "age": 14}, + {"name": "Scout", "age": 9}, + ]) + .map_batches(add_dog_years) + ) + ds.show() + + .. testoutput:: + + {'name': 'Luna', 'age': 4, 'age_in_dog_years': 28} + {'name': 'Rory', 'age': 14, 'age_in_dog_years': 98} + {'name': 'Scout', 'age': 9, 'age_in_dog_years': 63} + + If your function returns large objects, yield outputs in chunks. + + .. testcode:: + + from typing import Dict + import ray + import numpy as np + + def map_fn_with_large_output(batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + for i in range(3): + yield {"large_output": np.ones((100, 1000))} + + ds = ( + ray.data.from_items([1]) + .map_batches(map_fn_with_large_output) + ) + + If you require stateful transfomation, + use Python callable class. Here is an example showing how to use stateful transforms to create model inference workers, without having to reload the model on each call. + + .. testcode:: + + from typing import Dict + import numpy as np + import torch + import ray + + class TorchPredictor: + + def __init__(self): + self.model = torch.nn.Identity().cuda() + self.model.eval() + + def __call__(self, batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + inputs = torch.as_tensor(batch["data"], dtype=torch.float32).cuda() + with torch.inference_mode(): + batch["output"] = self.model(inputs).detach().cpu().numpy() + return batch + + ds = ( + ray.data.from_numpy(np.ones((32, 100))) + .map_batches( + TorchPredictor, + # Two workers with one GPU each + concurrency=2, + # Batch size is required if you're using GPUs. + batch_size=4, + num_gpus=1 + ) + ) + + To learn more, see + :ref:`End-to-end: Offline Batch Inference `. + + Args: + fn: The function or generator to apply to a record batch, or a class type + that can be instantiated to create such a callable. Note ``fn`` must be + pickle-able. + batch_size: The desired number of rows in each batch, or ``None`` to use + entire blocks as batches (blocks may contain different numbers of rows). + The actual size of the batch provided to ``fn`` may be smaller than + ``batch_size`` if ``batch_size`` doesn't evenly divide the block(s) sent + to a given map task. Default batch_size is 1024 with "default". + compute: This argument is deprecated. Use ``concurrency`` argument. + batch_format: If ``"default"`` or ``"numpy"``, batches are + ``Dict[str, numpy.ndarray]``. If ``"pandas"``, batches are + ``pandas.DataFrame``. If ``"pyarrow"``, batches are + ``pyarrow.Table``. + zero_copy_batch: Whether ``fn`` should be provided zero-copy, read-only + batches. If this is ``True`` and no copy is required for the + ``batch_format`` conversion, the batch is a zero-copy, read-only + view on data in Ray's object store, which can decrease memory + utilization and improve performance. If this is ``False``, the batch + is writable, which requires an extra copy to guarantee. + If ``fn`` mutates its input, this needs to be ``False`` in order to + avoid "assignment destination is read-only" or "buffer source array is + read-only" errors. Default is ``False``. + fn_args: Positional arguments to pass to ``fn`` after the first argument. + These arguments are top-level arguments to the underlying Ray task. + fn_kwargs: Keyword arguments to pass to ``fn``. These arguments are + top-level arguments to the underlying Ray task. + fn_constructor_args: Positional arguments to pass to ``fn``'s constructor. + You can only provide this if ``fn`` is a callable class. These arguments + are top-level arguments in the underlying Ray actor construction task. + fn_constructor_kwargs: Keyword arguments to pass to ``fn``'s constructor. + This can only be provided if ``fn`` is a callable class. These arguments + are top-level arguments in the underlying Ray actor construction task. + num_cpus: The number of CPUs to reserve for each parallel map worker. + num_gpus: The number of GPUs to reserve for each parallel map worker. For + example, specify `num_gpus=1` to request 1 GPU for each parallel map worker. + concurrency: The number of Ray workers to use concurrently. For a fixed-sized + worker pool of size ``n``, specify ``concurrency=n``. For an autoscaling + worker pool from ``m`` to ``n`` workers, specify ``concurrency=(m, n)``. + ray_remote_args_fn: A function that returns a dictionary of remote args + passed to each map worker. The purpose of this argument is to generate + dynamic arguments for each actor/task, and will be called each time prior + to initializing the worker. Args returned from this dict will always + override the args in ``ray_remote_args``. Note: this is an advanced, + experimental feature. + ray_remote_args: Additional resource requirements to request from + ray for each map worker. + + .. note:: + + The size of the batches provided to ``fn`` might be smaller than the + specified ``batch_size`` if ``batch_size`` doesn't evenly divide the + block(s) sent to a given map task. + + If ``batch_size`` is set and each input block is smaller than the + ``batch_size``, Ray Data will bundle up many blocks as the input for one + task, until their total size is equal to or greater than the given + ``batch_size``. + If ``batch_size`` is not set, the bundling will not be performed. Each task + will receive only one input block. + + .. seealso:: + + :meth:`~Dataset.iter_batches` + Call this function to iterate over batches of data. + + :meth:`~Dataset.take_batch` + Call this function to get a batch of data from the dataset + in the same format as will be passed to the `fn` function of + :meth:`~Dataset.map_batches`. + + :meth:`~Dataset.flat_map` + Call this method to create new records from existing ones. Unlike + :meth:`~Dataset.map`, a function passed to :meth:`~Dataset.flat_map` + can return multiple records. + + :meth:`~Dataset.map` + Call this method to transform one record at time. + + """ # noqa: E501 + use_gpus = num_gpus is not None and num_gpus > 0 + if use_gpus and (batch_size is None or batch_size == "default"): + raise ValueError( + "You must provide `batch_size` to `map_batches` when requesting GPUs. " + "The optimal batch size depends on the model, data, and GPU used. " + "We recommend using the largest batch size that doesn't result " + "in your GPU device running out of memory. You can view the GPU memory " + "usage via the Ray dashboard." + ) + + if isinstance(batch_size, int) and batch_size < 1: + raise ValueError("Batch size can't be negative or 0") + + return self._map_batches_without_batch_size_validation( + fn, + batch_size=batch_size, + compute=compute, + batch_format=batch_format, + zero_copy_batch=zero_copy_batch, + fn_args=fn_args, + fn_kwargs=fn_kwargs, + fn_constructor_args=fn_constructor_args, + fn_constructor_kwargs=fn_constructor_kwargs, + num_cpus=num_cpus, + num_gpus=num_gpus, + concurrency=concurrency, + ray_remote_args_fn=ray_remote_args_fn, + **ray_remote_args, + ) + + def _map_batches_without_batch_size_validation( + self, + fn: UserDefinedFunction[DataBatch, DataBatch], + *, + batch_size: Union[int, None, Literal["default"]], + compute: Optional[ComputeStrategy], + batch_format: Optional[str], + zero_copy_batch: bool, + fn_args: Optional[Iterable[Any]], + fn_kwargs: Optional[Dict[str, Any]], + fn_constructor_args: Optional[Iterable[Any]], + fn_constructor_kwargs: Optional[Dict[str, Any]], + num_cpus: Optional[float], + num_gpus: Optional[float], + concurrency: Optional[Union[int, Tuple[int, int]]], + ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]], + **ray_remote_args, + ): + # NOTE: The `map_groups` implementation calls `map_batches` with + # `batch_size=None`. The issue is that if you request GPUs with + # `batch_size=None`, then `map_batches` raises a value error. So, to allow users + # to call `map_groups` with GPUs, we need a separate method that doesn't + # perform batch size validation. + + compute = get_compute_strategy( + fn, + fn_constructor_args=fn_constructor_args, + compute=compute, + concurrency=concurrency, + ) + + if num_cpus is not None: + ray_remote_args["num_cpus"] = num_cpus + + if num_gpus is not None: + ray_remote_args["num_gpus"] = num_gpus + + batch_format = _apply_batch_format(batch_format) + + min_rows_per_bundled_input = None + if batch_size is not None and batch_size != "default": + # Enable blocks bundling when batch_size is specified by caller. + min_rows_per_bundled_input = batch_size + batch_size = _apply_batch_size(batch_size) + + if batch_format not in VALID_BATCH_FORMATS: + raise ValueError( + f"The batch format must be one of {VALID_BATCH_FORMATS}, got: " + f"{batch_format}" + ) + + plan = self._plan.copy() + map_batches_op = MapBatches( + self._logical_plan.dag, + fn, + batch_size=batch_size, + batch_format=batch_format, + zero_copy_batch=zero_copy_batch, + min_rows_per_bundled_input=min_rows_per_bundled_input, + fn_args=fn_args, + fn_kwargs=fn_kwargs, + fn_constructor_args=fn_constructor_args, + fn_constructor_kwargs=fn_constructor_kwargs, + compute=compute, + ray_remote_args_fn=ray_remote_args_fn, + ray_remote_args=ray_remote_args, + ) + logical_plan = LogicalPlan(map_batches_op, self.context) + return Dataset(plan, logical_plan) + + @PublicAPI(api_group=BT_API_GROUP) + def add_column( + self, + col: str, + fn: Callable[ + [DataBatch], + DataBatchColumn, + ], + *, + batch_format: Optional[str] = "pandas", + compute: Optional[str] = None, + concurrency: Optional[Union[int, Tuple[int, int]]] = None, + **ray_remote_args, + ) -> "Dataset": + """Add the given column to the dataset. + + A function generating the new column values given the batch in pyarrow or pandas + format must be specified. This function must operate on batches of + `batch_format`. + + Examples: + + + >>> import ray + >>> ds = ray.data.range(100) + >>> ds.schema() + Column Type + ------ ---- + id int64 + + Add a new column equal to ``id * 2``. + + >>> ds.add_column("new_id", lambda df: df["id"] * 2).schema() + Column Type + ------ ---- + id int64 + new_id int64 + + Time complexity: O(dataset size / parallelism) + + Args: + col: Name of the column to add. If the name already exists, the + column is overwritten. + fn: Map function generating the column values given a batch of + records in pandas format. + batch_format: If ``"default"`` or ``"numpy"``, batches are + ``Dict[str, numpy.ndarray]``. If ``"pandas"``, batches are + ``pandas.DataFrame``. If ``"pyarrow"``, batches are + ``pyarrow.Table``. If ``"numpy"``, batches are + ``Dict[str, numpy.ndarray]``. + compute: This argument is deprecated. Use ``concurrency`` argument. + concurrency: The number of Ray workers to use concurrently. For a + fixed-sized worker pool of size ``n``, specify ``concurrency=n``. For + an autoscaling worker pool from ``m`` to ``n`` workers, specify + ``concurrency=(m, n)``. + ray_remote_args: Additional resource requirements to request from + ray (e.g., num_gpus=1 to request GPUs for the map tasks). + """ + # Check that batch_format + accepted_batch_formats = ["pandas", "pyarrow", "numpy"] + if batch_format not in accepted_batch_formats: + raise ValueError( + f"batch_format argument must be on of {accepted_batch_formats}, " + f"got: {batch_format}" + ) + + def add_column(batch: DataBatch) -> DataBatch: + column = fn(batch) + if batch_format == "pandas": + import pandas as pd + + assert isinstance(column, pd.Series), ( + f"For pandas batch format, the function must return a pandas " + f"Series, got: {type(column)}" + ) + if col in batch: + raise ValueError( + f"Trying to add an existing column with name" f" {col}" + ) + batch.loc[:, col] = column + return batch + elif batch_format == "pyarrow": + import pyarrow as pa + + assert isinstance(column, (pa.Array, pa.ChunkedArray)), ( + f"For pyarrow batch format, the function must return a pyarrow " + f"Array, got: {type(column)}" + ) + # Historically, this method was written for pandas batch format. + # To resolve https://github.com/ray-project/ray/issues/48090, + # we also allow pyarrow batch format which is preferred but would be + # a breaking change to enforce. + + # For pyarrow, the index of the column will be -1 if it is missing in + # which case we'll want to append it + column_idx = batch.schema.get_field_index(col) + if column_idx == -1: + # Append the column to the table + return batch.append_column(col, column) + else: + raise ValueError( + f"Trying to add an existing column with name {col}" + ) + + else: + # batch format is assumed to be numpy since we checked at the + # beginning of the add_column function + assert isinstance(column, np.ndarray), ( + f"For numpy batch format, the function must return a " + f"numpy.ndarray, got: {type(column)}" + ) + if col in batch: + raise ValueError( + f"Trying to add an existing column with name" f" {col}" + ) + batch[col] = column + return batch + + if not callable(fn): + raise ValueError("`fn` must be callable, got {}".format(fn)) + + return self.map_batches( + add_column, + batch_format=batch_format, + compute=compute, + concurrency=concurrency, + zero_copy_batch=False, + **ray_remote_args, + ) + + @PublicAPI(api_group=BT_API_GROUP) + def drop_columns( + self, + cols: List[str], + *, + compute: Optional[str] = None, + concurrency: Optional[Union[int, Tuple[int, int]]] = None, + **ray_remote_args, + ) -> "Dataset": + """Drop one or more columns from the dataset. + + Examples: + + >>> import ray + >>> ds = ray.data.read_parquet("s3://anonymous@ray-example-data/iris.parquet") + >>> ds.schema() + Column Type + ------ ---- + sepal.length double + sepal.width double + petal.length double + petal.width double + variety string + >>> ds.drop_columns(["variety"]).schema() + Column Type + ------ ---- + sepal.length double + sepal.width double + petal.length double + petal.width double + + Time complexity: O(dataset size / parallelism) + + Args: + cols: Names of the columns to drop. If any name does not exist, + an exception is raised. Column names must be unique. + compute: This argument is deprecated. Use ``concurrency`` argument. + concurrency: The number of Ray workers to use concurrently. For a fixed-sized + worker pool of size ``n``, specify ``concurrency=n``. For an autoscaling + worker pool from ``m`` to ``n`` workers, specify ``concurrency=(m, n)``. + ray_remote_args: Additional resource requirements to request from + ray (e.g., num_gpus=1 to request GPUs for the map tasks). + """ # noqa: E501 + + if len(cols) != len(set(cols)): + raise ValueError(f"drop_columns expects unique column names, got: {cols}") + + def drop_columns(batch): + return batch.drop(cols) + + return self.map_batches( + drop_columns, + batch_format="pyarrow", + zero_copy_batch=True, + compute=compute, + concurrency=concurrency, + **ray_remote_args, + ) + + @PublicAPI(api_group=BT_API_GROUP) + def select_columns( + self, + cols: List[str], + *, + compute: Union[str, ComputeStrategy] = None, + concurrency: Optional[Union[int, Tuple[int, int]]] = None, + **ray_remote_args, + ) -> "Dataset": + """Select one or more columns from the dataset. + + Specified columns must be in the dataset schema. + + .. tip:: + If you're reading parquet files with :meth:`ray.data.read_parquet`, + you might be able to speed it up by using projection pushdown; see + :ref:`Parquet column pruning ` for details. + + Examples: + + >>> import ray + >>> ds = ray.data.read_parquet("s3://anonymous@ray-example-data/iris.parquet") + >>> ds.schema() + Column Type + ------ ---- + sepal.length double + sepal.width double + petal.length double + petal.width double + variety string + >>> ds.select_columns(["sepal.length", "sepal.width"]).schema() + Column Type + ------ ---- + sepal.length double + sepal.width double + + Time complexity: O(dataset size / parallelism) + + Args: + cols: Names of the columns to select. If a name isn't in the + dataset schema, an exception is raised. Columns also should be unique. + compute: This argument is deprecated. Use ``concurrency`` argument. + concurrency: The number of Ray workers to use concurrently. For a fixed-sized + worker pool of size ``n``, specify ``concurrency=n``. For an autoscaling + worker pool from ``m`` to ``n`` workers, specify ``concurrency=(m, n)``. + ray_remote_args: Additional resource requirements to request from + ray (e.g., num_gpus=1 to request GPUs for the map tasks). + """ # noqa: E501 + + if not isinstance(cols, list): + raise ValueError( + "select_columns expected List[str], " + f"got {type(cols)} for input '{cols}'" + ) + + bad_input = [col for col in cols if not isinstance(col, str)] + + if bad_input: + raise ValueError( + "select_columns expected List[str], " + f"got input type: {type(bad_input[0])} " + f"for input {cols}" + ) + + if len(cols) != len(set(cols)): + raise ValueError( + "select_columns expected unique column names, " + f"got duplicate column names: {cols}" + ) + + # Don't feel like we really need this + from ray.data._internal.compute import TaskPoolStrategy + + compute = TaskPoolStrategy(size=concurrency) + + plan = self._plan.copy() + select_op = Project( + self._logical_plan.dag, + cols=cols, + compute=compute, + ray_remote_args=ray_remote_args, + ) + logical_plan = LogicalPlan(select_op, self.context) + return Dataset(plan, logical_plan) + + @PublicAPI(api_group=BT_API_GROUP) + def rename_columns( + self, + names: Union[List[str], Dict[str, str]], + *, + concurrency: Optional[Union[int, Tuple[int, int]]] = None, + **ray_remote_args, + ): + """Rename columns in the dataset. + + Examples: + + >>> import ray + >>> ds = ray.data.read_parquet("s3://anonymous@ray-example-data/iris.parquet") + >>> ds.schema() + Column Type + ------ ---- + sepal.length double + sepal.width double + petal.length double + petal.width double + variety string + + You can pass a dictionary mapping old column names to new column names. + + >>> ds.rename_columns({"variety": "category"}).schema() + Column Type + ------ ---- + sepal.length double + sepal.width double + petal.length double + petal.width double + category string + + Or you can pass a list of new column names. + + >>> ds.rename_columns( + ... ["sepal_length", "sepal_width", "petal_length", "petal_width", "variety"] + ... ).schema() + Column Type + ------ ---- + sepal_length double + sepal_width double + petal_length double + petal_width double + variety string + + Args: + mapper: A dictionary that maps old column names to new column names, or a + list of new column names. + concurrency: The maximum number of Ray workers to use concurrently. + ray_remote_args: Additional resource requirements to request from + ray (e.g., num_gpus=1 to request GPUs for the map tasks). + """ # noqa: E501 + if concurrency is not None and not isinstance(concurrency, int): + raise ValueError( + "Expected `concurrency` to be an integer or `None`, but got " + f"{concurrency}." + ) + + def rename_columns(batch: "pyarrow.Table") -> "pyarrow.Table": + # Versions of PyArrow before 17 don't support renaming columns with a dict. + if isinstance(names, dict): + column_names_list = batch.column_names + for i, column_name in enumerate(column_names_list): + if column_name in names: + column_names_list[i] = names[column_name] + else: + column_names_list = names + + return batch.rename_columns(column_names_list) + + return self.map_batches( + rename_columns, + batch_format="pyarrow", + zero_copy_batch=True, + concurrency=concurrency, + **ray_remote_args, + ) + + @PublicAPI(api_group=BT_API_GROUP) + def flat_map( + self, + fn: UserDefinedFunction[Dict[str, Any], List[Dict[str, Any]]], + *, + compute: Optional[ComputeStrategy] = None, + fn_args: Optional[Iterable[Any]] = None, + fn_kwargs: Optional[Dict[str, Any]] = None, + fn_constructor_args: Optional[Iterable[Any]] = None, + fn_constructor_kwargs: Optional[Dict[str, Any]] = None, + num_cpus: Optional[float] = None, + num_gpus: Optional[float] = None, + concurrency: Optional[Union[int, Tuple[int, int]]] = None, + ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None, + **ray_remote_args, + ) -> "Dataset": + """Apply the given function to each row and then flatten results. + + Use this method if your transformation returns multiple rows for each input + row. + + You can use either a function or a callable class to perform the transformation. + For functions, Ray Data uses stateless Ray tasks. For classes, Ray Data uses + stateful Ray actors. For more information, see + :ref:`Stateful Transforms `. + + .. tip:: + :meth:`~Dataset.map_batches` can also modify the number of rows. If your + transformation is vectorized like most NumPy and pandas operations, + it might be faster. + + .. warning:: + Specifying both ``num_cpus`` and ``num_gpus`` for map tasks is experimental, + and may result in scheduling or stability issues. Please + `report any issues `_ + to the Ray team. + + Examples: + + .. testcode:: + + from typing import Any, Dict, List + import ray + + def duplicate_row(row: Dict[str, Any]) -> List[Dict[str, Any]]: + return [row] * 2 + + print( + ray.data.range(3) + .flat_map(duplicate_row) + .take_all() + ) + + .. testoutput:: + + [{'id': 0}, {'id': 0}, {'id': 1}, {'id': 1}, {'id': 2}, {'id': 2}] + + Time complexity: O(dataset size / parallelism) + + Args: + fn: The function or generator to apply to each record, or a class type + that can be instantiated to create such a callable. + compute: This argument is deprecated. Use ``concurrency`` argument. + fn_args: Positional arguments to pass to ``fn`` after the first argument. + These arguments are top-level arguments to the underlying Ray task. + fn_kwargs: Keyword arguments to pass to ``fn``. These arguments are + top-level arguments to the underlying Ray task. + fn_constructor_args: Positional arguments to pass to ``fn``'s constructor. + You can only provide this if ``fn`` is a callable class. These arguments + are top-level arguments in the underlying Ray actor construction task. + fn_constructor_kwargs: Keyword arguments to pass to ``fn``'s constructor. + This can only be provided if ``fn`` is a callable class. These arguments + are top-level arguments in the underlying Ray actor construction task. + num_cpus: The number of CPUs to reserve for each parallel map worker. + num_gpus: The number of GPUs to reserve for each parallel map worker. For + example, specify `num_gpus=1` to request 1 GPU for each parallel map + worker. + concurrency: The number of Ray workers to use concurrently. For a + fixed-sized worker pool of size ``n``, specify ``concurrency=n``. + For an autoscaling worker pool from ``m`` to ``n`` workers, specify + ``concurrency=(m, n)``. + ray_remote_args_fn: A function that returns a dictionary of remote args + passed to each map worker. The purpose of this argument is to generate + dynamic arguments for each actor/task, and will be called each time + prior to initializing the worker. Args returned from this dict will + always override the args in ``ray_remote_args``. Note: this is an + advanced, experimental feature. + ray_remote_args: Additional resource requirements to request from + ray for each map worker. + + .. seealso:: + + :meth:`~Dataset.map_batches` + Call this method to transform batches of data. + + :meth:`~Dataset.map` + Call this method to transform one row at time. + """ + compute = get_compute_strategy( + fn, + fn_constructor_args=fn_constructor_args, + compute=compute, + concurrency=concurrency, + ) + + if num_cpus is not None: + ray_remote_args["num_cpus"] = num_cpus + + if num_gpus is not None: + ray_remote_args["num_gpus"] = num_gpus + + plan = self._plan.copy() + op = FlatMap( + input_op=self._logical_plan.dag, + fn=fn, + fn_args=fn_args, + fn_kwargs=fn_kwargs, + fn_constructor_args=fn_constructor_args, + fn_constructor_kwargs=fn_constructor_kwargs, + compute=compute, + ray_remote_args_fn=ray_remote_args_fn, + ray_remote_args=ray_remote_args, + ) + logical_plan = LogicalPlan(op, self.context) + return Dataset(plan, logical_plan) + + @PublicAPI(api_group=BT_API_GROUP) + def filter( + self, + fn: UserDefinedFunction[Dict[str, Any], bool], + *, + compute: Union[str, ComputeStrategy] = None, + concurrency: Optional[Union[int, Tuple[int, int]]] = None, + ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None, + **ray_remote_args, + ) -> "Dataset": + """Filter out rows that don't satisfy the given predicate. + + You can use either a function or a callable class to perform the transformation. + For functions, Ray Data uses stateless Ray tasks. For classes, Ray Data uses + stateful Ray actors. For more information, see + :ref:`Stateful Transforms `. + + .. tip:: + If you can represent your predicate with NumPy or pandas operations, + :meth:`Dataset.map_batches` might be faster. You can implement filter by + dropping rows. + + .. tip:: + If you're reading parquet files with :meth:`ray.data.read_parquet`, + and the filter is a simple predicate, you might + be able to speed it up by using filter pushdown; see + :ref:`Parquet row pruning ` for details. + + Examples: + + >>> import ray + >>> ds = ray.data.range(100) + >>> ds.filter(lambda row: row["id"] % 2 == 0).take_all() + [{'id': 0}, {'id': 2}, {'id': 4}, ...] + + Time complexity: O(dataset size / parallelism) + + Args: + fn: The predicate to apply to each row, or a class type + that can be instantiated to create such a callable. + compute: This argument is deprecated. Use ``concurrency`` argument. + concurrency: The number of Ray workers to use concurrently. For a + fixed-sized worker pool of size ``n``, specify ``concurrency=n``. + For an autoscaling worker pool from ``m`` to ``n`` workers, specify + ``concurrency=(m, n)``. + ray_remote_args_fn: A function that returns a dictionary of remote args + passed to each map worker. The purpose of this argument is to generate + dynamic arguments for each actor/task, and will be called each time + prior to initializing the worker. Args returned from this dict will + always override the args in ``ray_remote_args``. Note: this is an + advanced, experimental feature. + ray_remote_args: Additional resource requirements to request from + ray (e.g., num_gpus=1 to request GPUs for the map tasks). + """ + compute = get_compute_strategy( + fn, + compute=compute, + concurrency=concurrency, + ) + + plan = self._plan.copy() + op = Filter( + input_op=self._logical_plan.dag, + fn=fn, + compute=compute, + ray_remote_args_fn=ray_remote_args_fn, + ray_remote_args=ray_remote_args, + ) + logical_plan = LogicalPlan(op, self.context) + return Dataset(plan, logical_plan) + + @AllToAllAPI + @PublicAPI(api_group=SSR_API_GROUP) + def repartition( + self, + num_blocks: int, + *, + shuffle: bool = False, + ) -> "Dataset": + """Repartition the :class:`Dataset` into exactly this number of :ref:`blocks `. + + This method can be useful to tune the performance of your pipeline. To learn + more, see :ref:`Advanced: Performance Tips and Tuning `. + + If you're writing data to files, you can also use this method to change the + number of output files. To learn more, see + :ref:`Changing the number of output files `. + + .. note:: + + Repartition has two modes. If ``shuffle=False``, Ray Data performs the + minimal data movement needed to equalize block sizes. Otherwise, Ray Data + performs a full distributed shuffle. + + .. image:: /data/images/dataset-shuffle.svg + :align: center + + .. + https://docs.google.com/drawings/d/132jhE3KXZsf29ho1yUdPrCHB9uheHBWHJhDQMXqIVPA/edit + + Examples: + >>> import ray + >>> ds = ray.data.range(100).repartition(10).materialize() + >>> ds.num_blocks() + 10 + + Time complexity: O(dataset size / parallelism) + + Args: + num_blocks: The number of blocks. + shuffle: Whether to perform a distributed shuffle during the + repartition. When shuffle is enabled, each output block + contains a subset of data rows from each input block, which + requires all-to-all data movement. When shuffle is disabled, + output blocks are created from adjacent input blocks, + minimizing data movement. + + Returns: + The repartitioned :class:`Dataset`. + """ # noqa: E501 + plan = self._plan.copy() + op = Repartition( + self._logical_plan.dag, + num_outputs=num_blocks, + shuffle=shuffle, + ) + logical_plan = LogicalPlan(op, self.context) + return Dataset(plan, logical_plan) + + @AllToAllAPI + @PublicAPI(api_group=SSR_API_GROUP) + def random_shuffle( + self, + *, + seed: Optional[int] = None, + num_blocks: Optional[int] = None, + **ray_remote_args, + ) -> "Dataset": + """Randomly shuffle the rows of this :class:`Dataset`. + + .. tip:: + + This method can be slow. For better performance, try + :ref:`Iterating over batches with shuffling `. + Also, see :ref:`Optimizing shuffles `. + + Examples: + >>> import ray + >>> ds = ray.data.range(100) + >>> ds.random_shuffle().take(3) # doctest: +SKIP + {'id': 41}, {'id': 21}, {'id': 92}] + >>> ds.random_shuffle(seed=42).take(3) # doctest: +SKIP + {'id': 77}, {'id': 21}, {'id': 63}] + + Time complexity: O(dataset size / parallelism) + + Args: + seed: Fix the random seed to use, otherwise one is chosen + based on system randomness. + + Returns: + The shuffled :class:`Dataset`. + """ # noqa: E501 + + if num_blocks is not None: + raise DeprecationWarning( + "`num_blocks` parameter is deprecated in Ray 2.9. random_shuffle() " + "does not support to change the number of output blocks. Use " + "repartition() instead.", # noqa: E501 + ) + plan = self._plan.copy() + op = RandomShuffle( + self._logical_plan.dag, + seed=seed, + ray_remote_args=ray_remote_args, + ) + logical_plan = LogicalPlan(op, self.context) + return Dataset(plan, logical_plan) + + @AllToAllAPI + @PublicAPI(api_group=SSR_API_GROUP) + def randomize_block_order( + self, + *, + seed: Optional[int] = None, + ) -> "Dataset": + """Randomly shuffle the :ref:`blocks ` of this :class:`Dataset`. + + This method is useful if you :meth:`~Dataset.split` your dataset into shards and + want to randomize the data in each shard without performing a full + :meth:`~Dataset.random_shuffle`. + + Examples: + >>> import ray + >>> ds = ray.data.range(100) + >>> ds.take(5) + [{'id': 0}, {'id': 1}, {'id': 2}, {'id': 3}, {'id': 4}] + >>> ds.randomize_block_order().take(5) # doctest: +SKIP + {'id': 15}, {'id': 16}, {'id': 17}, {'id': 18}, {'id': 19}] + + Args: + seed: Fix the random seed to use, otherwise one is chosen + based on system randomness. + + Returns: + The block-shuffled :class:`Dataset`. + """ # noqa: E501 + + plan = self._plan.copy() + op = RandomizeBlocks( + self._logical_plan.dag, + seed=seed, + ) + logical_plan = LogicalPlan(op, self.context) + return Dataset(plan, logical_plan) + + @PublicAPI(api_group=BT_API_GROUP) + def random_sample( + self, fraction: float, *, seed: Optional[int] = None + ) -> "Dataset": + """Returns a new :class:`Dataset` containing a random fraction of the rows. + + .. note:: + + This method returns roughly ``fraction * total_rows`` rows. An exact number + of rows isn't guaranteed. + + Examples: + >>> import ray + >>> ds = ray.data.range(100) + >>> ds.random_sample(0.1).count() # doctest: +SKIP + 10 + + Args: + fraction: The fraction of elements to sample. + seed: Seeds the python random pRNG generator. + + Returns: + Returns a :class:`Dataset` containing the sampled rows. + """ + import random + + import pandas as pd + import pyarrow as pa + + if self._plan.initial_num_blocks() == 0: + raise ValueError("Cannot sample from an empty Dataset.") + + if fraction < 0 or fraction > 1: + raise ValueError("Fraction must be between 0 and 1.") + + if seed is not None: + random.seed(seed) + + def random_sample(batch): + if isinstance(batch, list): + return [row for row in batch if random.random() <= fraction] + if isinstance(batch, pa.Table): + # Lets the item pass if weight generated for that item <= fraction + return batch.filter( + pa.array(random.random() <= fraction for _ in range(len(batch))) + ) + if isinstance(batch, pd.DataFrame): + return batch.sample(frac=fraction) + if isinstance(batch, np.ndarray): + return _create_possibly_ragged_ndarray( + [row for row in batch if random.random() <= fraction] + ) + raise ValueError(f"Unsupported batch type: {type(batch)}") + + return self.map_batches(random_sample, batch_format=None) + + @ConsumptionAPI + @PublicAPI(api_group=SMD_API_GROUP) + def streaming_split( + self, + n: int, + *, + equal: bool = False, + locality_hints: Optional[List["NodeIdStr"]] = None, + ) -> List[DataIterator]: + """Returns ``n`` :class:`DataIterators ` that can + be used to read disjoint subsets of the dataset in parallel. + + This method is the recommended way to consume :class:`Datasets ` for + distributed training. + + Streaming split works by delegating the execution of this :class:`Dataset` to a + coordinator actor. The coordinator pulls block references from the executed + stream, and divides those blocks among ``n`` output iterators. Iterators pull + blocks from the coordinator actor to return to their caller on ``next``. + + The returned iterators are also repeatable; each iteration will trigger a + new execution of the Dataset. There is an implicit barrier at the start of + each iteration, which means that `next` must be called on all iterators before + the iteration starts. + + .. warning:: + + Because iterators are pulling blocks from the same :class:`Dataset` + execution, if one iterator falls behind, other iterators may be stalled. + + Examples: + + .. testcode:: + + import ray + + ds = ray.data.range(100) + it1, it2 = ds.streaming_split(2, equal=True) + + Consume data from iterators in parallel. + + .. testcode:: + + @ray.remote + def consume(it): + for batch in it.iter_batches(): + pass + + ray.get([consume.remote(it1), consume.remote(it2)]) + + You can loop over the iterators multiple times (multiple epochs). + + .. testcode:: + + @ray.remote + def train(it): + NUM_EPOCHS = 2 + for _ in range(NUM_EPOCHS): + for batch in it.iter_batches(): + pass + + ray.get([train.remote(it1), train.remote(it2)]) + + The following remote function call blocks waiting for a read on ``it2`` to + start. + + .. testcode:: + :skipif: True + + ray.get(train.remote(it1)) + + Args: + n: Number of output iterators to return. + equal: If ``True``, each output iterator sees an exactly equal number + of rows, dropping data if necessary. If ``False``, some iterators may + see slightly more or less rows than others, but no data is dropped. + locality_hints: Specify the node ids corresponding to each iterator + location. Dataset will try to minimize data movement based on the + iterator output locations. This list must have length ``n``. You can + get the current node id of a task or actor by calling + ``ray.get_runtime_context().get_node_id()``. + + Returns: + The output iterator splits. These iterators are Ray-serializable and can + be freely passed to any Ray task or actor. + + .. seealso:: + + :meth:`Dataset.split` + Unlike :meth:`~Dataset.streaming_split`, :meth:`~Dataset.split` + materializes the dataset in memory. + """ + return StreamSplitDataIterator.create(self, n, equal, locality_hints) + + @ConsumptionAPI + @PublicAPI(api_group=SMD_API_GROUP) + def split( + self, n: int, *, equal: bool = False, locality_hints: Optional[List[Any]] = None + ) -> List["MaterializedDataset"]: + """Materialize and split the dataset into ``n`` disjoint pieces. + + This method returns a list of ``MaterializedDataset`` that can be passed to Ray + Tasks and Actors and used to read the dataset rows in parallel. + + Examples: + + .. testcode:: + + @ray.remote + class Worker: + + def train(self, data_iterator): + for batch in data_iterator.iter_batches(batch_size=8): + pass + + workers = [Worker.remote() for _ in range(4)] + shards = ray.data.range(100).split(n=4, equal=True) + ray.get([w.train.remote(s) for w, s in zip(workers, shards)]) + + Time complexity: O(1) + + Args: + n: Number of child datasets to return. + equal: Whether to guarantee each split has an equal + number of records. This might drop records if the rows can't be + divided equally among the splits. + locality_hints: [Experimental] A list of Ray actor handles of size ``n``. + The system tries to co-locate the blocks of the i-th dataset + with the i-th actor to maximize data locality. + + Returns: + A list of ``n`` disjoint dataset splits. + + .. seealso:: + + :meth:`Dataset.split_at_indices` + Unlike :meth:`~Dataset.split`, which splits a dataset into approximately + equal splits, :meth:`Dataset.split_proportionately` lets you split a + dataset into different sizes. + + :meth:`Dataset.split_proportionately` + This method is equivalent to :meth:`Dataset.split_at_indices` if + you compute indices manually. + + :meth:`Dataset.streaming_split`. + Unlike :meth:`~Dataset.split`, :meth:`~Dataset.streaming_split` + doesn't materialize the dataset in memory. + """ + if n <= 0: + raise ValueError(f"The number of splits {n} is not positive.") + + # fallback to split_at_indices for equal split without locality hints. + # simple benchmarks shows spilit_at_indices yields more stable performance. + # https://github.com/ray-project/ray/pull/26641 for more context. + if equal and locality_hints is None: + count = self.count() + split_index = count // n + # we are creating n split_indices which will generate + # n + 1 splits; the last split will at most contains (n - 1) + # rows, which could be safely dropped. + split_indices = [split_index * i for i in range(1, n + 1)] + shards = self.split_at_indices(split_indices) + return shards[:n] + + if locality_hints and len(locality_hints) != n: + raise ValueError( + f"The length of locality_hints {len(locality_hints)} " + f"doesn't equal the number of splits {n}." + ) + + bundle = self._plan.execute() + # We should not free blocks since we will materialize the Datasets. + owned_by_consumer = False + stats = self._plan.stats() + block_refs, metadata = zip(*bundle.blocks) + + if locality_hints is None: + block_refs_splits = np.array_split(block_refs, n) + metadata_splits = np.array_split(metadata, n) + + split_datasets = [] + for block_refs_split, metadata_split in zip( + block_refs_splits, metadata_splits + ): + ref_bundles = [ + RefBundle([(b, m)], owns_blocks=owned_by_consumer) + for b, m in zip(block_refs_split, metadata_split) + ] + logical_plan = LogicalPlan( + InputData(input_data=ref_bundles), self.context + ) + split_datasets.append( + MaterializedDataset( + ExecutionPlan(stats), + logical_plan, + ) + ) + return split_datasets + + metadata_mapping = dict(zip(block_refs, metadata)) + + # If the locality_hints is set, we use a two-round greedy algorithm + # to co-locate the blocks with the actors based on block + # and actor's location (node_id). + # + # The split algorithm tries to allocate equally-sized blocks regardless + # of locality. Thus we first calculate the expected number of blocks + # for each split. + # + # In the first round, for each actor, we look for all blocks that + # match the actor's node_id, then allocate those matched blocks to + # this actor until we reach the limit(expected number). + # + # In the second round: fill each actor's allocation with + # remaining unallocated blocks until we reach the limit. + + def build_allocation_size_map( + num_blocks: int, actors: List[Any] + ) -> Dict[Any, int]: + """Given the total number of blocks and a list of actors, calcuate + the expected number of blocks to allocate for each actor. + """ + num_actors = len(actors) + num_blocks_per_actor = num_blocks // num_actors + num_blocks_left = num_blocks - num_blocks_per_actor * n + num_blocks_by_actor = {} + for i, actor in enumerate(actors): + num_blocks_by_actor[actor] = num_blocks_per_actor + if i < num_blocks_left: + num_blocks_by_actor[actor] += 1 + return num_blocks_by_actor + + def build_block_refs_by_node_id( + blocks: List[ObjectRef[Block]], + ) -> Dict[str, List[ObjectRef[Block]]]: + """Build the reverse index from node_id to block_refs. For + simplicity, if the block is stored on multiple nodes we + only pick the first one. + """ + block_ref_locations = ray.experimental.get_object_locations(blocks) + block_refs_by_node_id = collections.defaultdict(list) + for block_ref in blocks: + node_ids = block_ref_locations.get(block_ref, {}).get("node_ids", []) + node_id = node_ids[0] if node_ids else None + block_refs_by_node_id[node_id].append(block_ref) + return block_refs_by_node_id + + def build_node_id_by_actor(actors: List[Any]) -> Dict[Any, str]: + """Build a map from a actor to its node_id.""" + actors_state = ray._private.state.actors() + return { + actor: actors_state.get(actor._actor_id.hex(), {}) + .get("Address", {}) + .get("NodeID") + for actor in actors + } + + # expected number of blocks to be allocated for each actor + expected_block_count_by_actor = build_allocation_size_map( + len(block_refs), locality_hints + ) + # the reverse index from node_id to block_refs + block_refs_by_node_id = build_block_refs_by_node_id(block_refs) + # the map from actor to its node_id + node_id_by_actor = build_node_id_by_actor(locality_hints) + + allocation_per_actor = collections.defaultdict(list) + + # In the first round, for each actor, we look for all blocks that + # match the actor's node_id, then allocate those matched blocks to + # this actor until we reach the limit(expected number) + for actor in locality_hints: + node_id = node_id_by_actor[actor] + matching_blocks = block_refs_by_node_id[node_id] + expected_block_count = expected_block_count_by_actor[actor] + allocation = [] + while matching_blocks and len(allocation) < expected_block_count: + allocation.append(matching_blocks.pop()) + allocation_per_actor[actor] = allocation + + # In the second round: fill each actor's allocation with + # remaining unallocated blocks until we reach the limit + remaining_block_refs = list( + itertools.chain.from_iterable(block_refs_by_node_id.values()) + ) + for actor in locality_hints: + while ( + len(allocation_per_actor[actor]) < expected_block_count_by_actor[actor] + ): + allocation_per_actor[actor].append(remaining_block_refs.pop()) + + assert len(remaining_block_refs) == 0, len(remaining_block_refs) + + per_split_bundles = [] + for actor in locality_hints: + blocks = allocation_per_actor[actor] + metadata = [metadata_mapping[b] for b in blocks] + bundle = RefBundle( + tuple(zip(blocks, metadata)), owns_blocks=owned_by_consumer + ) + per_split_bundles.append(bundle) + + if equal: + # equalize the splits + per_split_bundles = _equalize(per_split_bundles, owned_by_consumer) + + split_datasets = [] + for bundle in per_split_bundles: + logical_plan = LogicalPlan(InputData(input_data=[bundle]), self.context) + split_datasets.append( + MaterializedDataset( + ExecutionPlan(stats), + logical_plan, + ) + ) + return split_datasets + + @ConsumptionAPI + @PublicAPI(api_group=SMD_API_GROUP) + def split_at_indices(self, indices: List[int]) -> List["MaterializedDataset"]: + """Materialize and split the dataset at the given indices (like ``np.split``). + + Examples: + >>> import ray + >>> ds = ray.data.range(10) + >>> d1, d2, d3 = ds.split_at_indices([2, 5]) + >>> d1.take_batch() + {'id': array([0, 1])} + >>> d2.take_batch() + {'id': array([2, 3, 4])} + >>> d3.take_batch() + {'id': array([5, 6, 7, 8, 9])} + + Time complexity: O(num splits) + + Args: + indices: List of sorted integers which indicate where the dataset + are split. If an index exceeds the length of the dataset, + an empty dataset is returned. + + Returns: + The dataset splits. + + .. seealso:: + + :meth:`Dataset.split` + Unlike :meth:`~Dataset.split_at_indices`, which lets you split a + dataset into different sizes, :meth:`Dataset.split` splits a dataset + into approximately equal splits. + + :meth:`Dataset.split_proportionately` + This method is equivalent to :meth:`Dataset.split_at_indices` if + you compute indices manually. + + :meth:`Dataset.streaming_split`. + Unlike :meth:`~Dataset.split`, :meth:`~Dataset.streaming_split` + doesn't materialize the dataset in memory. + """ + + if len(indices) < 1: + raise ValueError("indices must be at least of length 1") + if sorted(indices) != indices: + raise ValueError("indices must be sorted") + if indices[0] < 0: + raise ValueError("indices must be positive") + start_time = time.perf_counter() + bundle = self._plan.execute() + blocks, metadata = _split_at_indices( + bundle.blocks, + indices, + False, + ) + split_duration = time.perf_counter() - start_time + parent_stats = self._plan.stats() + splits = [] + + for bs, ms in zip(blocks, metadata): + stats = DatasetStats(metadata={"Split": ms}, parent=parent_stats) + stats.time_total_s = split_duration + ref_bundles = [ + RefBundle([(b, m)], owns_blocks=False) for b, m in zip(bs, ms) + ] + logical_plan = LogicalPlan(InputData(input_data=ref_bundles), self.context) + + splits.append( + MaterializedDataset( + ExecutionPlan(stats), + logical_plan, + ) + ) + return splits + + @ConsumptionAPI + @PublicAPI(api_group=SMD_API_GROUP) + def split_proportionately( + self, proportions: List[float] + ) -> List["MaterializedDataset"]: + """Materialize and split the dataset using proportions. + + A common use case for this is splitting the dataset into train + and test sets (equivalent to eg. scikit-learn's ``train_test_split``). + For a higher level abstraction, see :meth:`Dataset.train_test_split`. + + This method splits datasets so that all splits + always contains at least one row. If that isn't possible, + an exception is raised. + + This is equivalent to caulculating the indices manually and calling + :meth:`Dataset.split_at_indices`. + + Examples: + >>> import ray + >>> ds = ray.data.range(10) + >>> d1, d2, d3 = ds.split_proportionately([0.2, 0.5]) + >>> d1.take_batch() + {'id': array([0, 1])} + >>> d2.take_batch() + {'id': array([2, 3, 4, 5, 6])} + >>> d3.take_batch() + {'id': array([7, 8, 9])} + + Time complexity: O(num splits) + + Args: + proportions: List of proportions to split the dataset according to. + Must sum up to less than 1, and each proportion must be bigger + than 0. + + Returns: + The dataset splits. + + .. seealso:: + + :meth:`Dataset.split` + Unlike :meth:`~Dataset.split_proportionately`, which lets you split a + dataset into different sizes, :meth:`Dataset.split` splits a dataset + into approximately equal splits. + + :meth:`Dataset.split_at_indices` + :meth:`Dataset.split_proportionately` uses this method under the hood. + + :meth:`Dataset.streaming_split`. + Unlike :meth:`~Dataset.split`, :meth:`~Dataset.streaming_split` + doesn't materialize the dataset in memory. + """ + + if len(proportions) < 1: + raise ValueError("proportions must be at least of length 1") + if sum(proportions) >= 1: + raise ValueError("proportions must sum to less than 1") + if any(p <= 0 for p in proportions): + raise ValueError("proportions must be bigger than 0") + + dataset_length = self.count() + cumulative_proportions = np.cumsum(proportions) + split_indices = [ + int(dataset_length * proportion) for proportion in cumulative_proportions + ] + + # Ensure each split has at least one element + subtract = 0 + for i in range(len(split_indices) - 2, -1, -1): + split_indices[i] -= subtract + if split_indices[i] == split_indices[i + 1]: + subtract += 1 + split_indices[i] -= 1 + if any(i <= 0 for i in split_indices): + raise ValueError( + "Couldn't create non-empty splits with the given proportions." + ) + + return self.split_at_indices(split_indices) + + @ConsumptionAPI + @PublicAPI(api_group=SMD_API_GROUP) + def train_test_split( + self, + test_size: Union[int, float], + *, + shuffle: bool = False, + seed: Optional[int] = None, + ) -> Tuple["MaterializedDataset", "MaterializedDataset"]: + """Materialize and split the dataset into train and test subsets. + + Examples: + + >>> import ray + >>> ds = ray.data.range(8) + >>> train, test = ds.train_test_split(test_size=0.25) + >>> train.take_batch() + {'id': array([0, 1, 2, 3, 4, 5])} + >>> test.take_batch() + {'id': array([6, 7])} + + Args: + test_size: If float, should be between 0.0 and 1.0 and represent the + proportion of the dataset to include in the test split. If int, + represents the absolute number of test samples. The train split + always complements the test split. + shuffle: Whether or not to globally shuffle the dataset before splitting. + Defaults to ``False``. This may be a very expensive operation with a + large dataset. + seed: Fix the random seed to use for shuffle, otherwise one is chosen + based on system randomness. Ignored if ``shuffle=False``. + + Returns: + Train and test subsets as two ``MaterializedDatasets``. + + .. seealso:: + + :meth:`Dataset.split_proportionately` + """ + ds = self + + if shuffle: + ds = ds.random_shuffle(seed=seed) + + if not isinstance(test_size, (int, float)): + raise TypeError(f"`test_size` must be int or float got {type(test_size)}.") + if isinstance(test_size, float): + if test_size <= 0 or test_size >= 1: + raise ValueError( + "If `test_size` is a float, it must be bigger than 0 and smaller " + f"than 1. Got {test_size}." + ) + return ds.split_proportionately([1 - test_size]) + else: + ds_length = ds.count() + if test_size <= 0 or test_size >= ds_length: + raise ValueError( + "If `test_size` is an int, it must be bigger than 0 and smaller " + f"than the size of the dataset ({ds_length}). " + f"Got {test_size}." + ) + return ds.split_at_indices([ds_length - test_size]) + + @PublicAPI(api_group=SMD_API_GROUP) + def union(self, *other: List["Dataset"]) -> "Dataset": + """Concatenate :class:`Datasets ` across rows. + + The order of the blocks in the datasets is preserved, as is the + relative ordering between the datasets passed in the argument list. + + .. caution:: + Unioned datasets aren't lineage-serializable. As a result, they can't be + used as a tunable hyperparameter in Ray Tune. + + Examples: + + >>> import ray + >>> ds1 = ray.data.range(2) + >>> ds2 = ray.data.range(3) + >>> ds1.union(ds2).take_all() + [{'id': 0}, {'id': 1}, {'id': 0}, {'id': 1}, {'id': 2}] + + Args: + other: List of datasets to combine with this one. The datasets + must have the same schema as this dataset, otherwise the + behavior is undefined. + + Returns: + A new dataset holding the rows of the input datasets. + """ + start_time = time.perf_counter() + + datasets = [self] + list(other) + logical_plans = [union_ds._plan._logical_plan for union_ds in datasets] + op = UnionLogicalOperator( + *[plan.dag for plan in logical_plans], + ) + logical_plan = LogicalPlan(op, self.context) + + stats = DatasetStats( + metadata={"Union": []}, + parent=[d._plan.stats() for d in datasets], + ) + stats.time_total_s = time.perf_counter() - start_time + return Dataset( + ExecutionPlan(stats), + logical_plan, + ) + + @AllToAllAPI + @PublicAPI(api_group=GGA_API_GROUP) + def groupby( + self, + key: Union[str, List[str], None], + ) -> "GroupedData": + """Group rows of a :class:`Dataset` according to a column. + + Use this method to transform data based on a + categorical variable. + + Examples: + + .. testcode:: + + import pandas as pd + import ray + + def normalize_variety(group: pd.DataFrame) -> pd.DataFrame: + for feature in group.drop("variety").columns: + group[feature] = group[feature] / group[feature].abs().max() + return group + + ds = ( + ray.data.read_parquet("s3://anonymous@ray-example-data/iris.parquet") + .groupby("variety") + .map_groups(normalize_variety, batch_format="pandas") + ) + + Time complexity: O(dataset size * log(dataset size / parallelism)) + + Args: + key: A column name or list of column names. + If this is ``None``, place all rows in a single group. + + Returns: + A lazy :class:`~ray.data.grouped_data.GroupedData`. + + .. seealso:: + + :meth:`~ray.data.grouped_data.GroupedData.map_groups` + Call this method to transform groups of data. + """ + from ray.data.grouped_data import GroupedData + + # Always allow None since groupby interprets that as grouping all + # records into a single global group. + if key is not None: + # Fetching the schema can trigger execution, so don't fetch it for + # input validation. + SortKey(key).validate_schema(self.schema(fetch_if_missing=False)) + + return GroupedData(self, key) + + @AllToAllAPI + @PublicAPI(api_group=GGA_API_GROUP) + def unique(self, column: str) -> List[Any]: + """List the unique elements in a given column. + + Examples: + + >>> import ray + >>> ds = ray.data.from_items([1, 2, 3, 2, 3]) + >>> ds.unique("item") + [1, 2, 3] + + This function is very useful for computing labels + in a machine learning dataset: + + >>> import ray + >>> ds = ray.data.read_csv("s3://anonymous@ray-example-data/iris.csv") + >>> ds.unique("target") + [0, 1, 2] + + One common use case is to convert the class labels + into integers for training and inference: + + >>> classes = {0: 'Setosa', 1: 'Versicolor', 2: 'Virginica'} + >>> def preprocessor(df, classes): + ... df["variety"] = df["target"].map(classes) + ... return df + >>> train_ds = ds.map_batches( + ... preprocessor, fn_kwargs={"classes": classes}, batch_format="pandas") + >>> train_ds.sort("sepal length (cm)").take(1) # Sort to make it deterministic + [{'sepal length (cm)': 4.3, ..., 'variety': 'Setosa'}] + + Time complexity: O(dataset size * log(dataset size / parallelism)) + + Args: + column: The column to collect unique elements over. + + Returns: + A list with unique elements in the given column. + """ # noqa: E501 + ds = self.select_columns([column]).groupby(column).count() + return [item[column] for item in ds.take_all()] + + @AllToAllAPI + @ConsumptionAPI + @PublicAPI(api_group=GGA_API_GROUP) + def aggregate(self, *aggs: AggregateFn) -> Union[Any, Dict[str, Any]]: + """Aggregate values using one or more functions. + + Use this method to compute metrics like the product of a column. + + Examples: + + .. testcode:: + + import ray + from ray.data.aggregate import AggregateFn + + ds = ray.data.from_items([{"number": i} for i in range(1, 10)]) + aggregation = AggregateFn( + init=lambda column: 1, + # Apply this to each row to produce a partial aggregate result + accumulate_row=lambda a, row: a * row["number"], + # Apply this to merge partial aggregate results into a final result + merge=lambda a1, a2: a1 * a2, + name="prod" + ) + print(ds.aggregate(aggregation)) + + .. testoutput:: + + {'prod': 362880} + + Time complexity: O(dataset size / parallelism) + + Args: + *aggs: :class:`Aggregations ` to perform. + + Returns: + A ``dict`` where each each value is an aggregation for a given column. + """ + ret = self.groupby(None).aggregate(*aggs).take(1) + return ret[0] if len(ret) > 0 else None + + @AllToAllAPI + @ConsumptionAPI + @PublicAPI(api_group=GGA_API_GROUP) + def sum( + self, on: Optional[Union[str, List[str]]] = None, ignore_nulls: bool = True + ) -> Union[Any, Dict[str, Any]]: + """Compute the sum of one or more columns. + + Examples: + >>> import ray + >>> ray.data.range(100).sum("id") + 4950 + >>> ray.data.from_items([ + ... {"A": i, "B": i**2} + ... for i in range(100) + ... ]).sum(["A", "B"]) + {'sum(A)': 4950, 'sum(B)': 328350} + + Args: + on: a column name or a list of column names to aggregate. + ignore_nulls: Whether to ignore null values. If ``True``, null + values are ignored when computing the sum. If ``False``, + when a null value is encountered, the output is ``None``. + Ray Data considers ``np.nan``, ``None``, and ``pd.NaT`` to be null + values. Default is ``True``. + + Returns: + The sum result. + + For different values of ``on``, the return varies: + + - ``on=None``: a dict containing the column-wise sum of all + columns, + - ``on="col"``: a scalar representing the sum of all items in + column ``"col"``, + - ``on=["col_1", ..., "col_n"]``: an n-column ``dict`` + containing the column-wise sum of the provided columns. + + If the dataset is empty, all values are null. If ``ignore_nulls`` is + ``False`` and any value is null, then the output is ``None``. + """ + ret = self._aggregate_on(Sum, on, ignore_nulls) + return self._aggregate_result(ret) + + @AllToAllAPI + @ConsumptionAPI + @PublicAPI(api_group=GGA_API_GROUP) + def min( + self, on: Optional[Union[str, List[str]]] = None, ignore_nulls: bool = True + ) -> Union[Any, Dict[str, Any]]: + """Return the minimum of one or more columns. + + Examples: + >>> import ray + >>> ray.data.range(100).min("id") + 0 + >>> ray.data.from_items([ + ... {"A": i, "B": i**2} + ... for i in range(100) + ... ]).min(["A", "B"]) + {'min(A)': 0, 'min(B)': 0} + + Args: + on: a column name or a list of column names to aggregate. + ignore_nulls: Whether to ignore null values. If ``True``, null + values are ignored when computing the min; if ``False``, + when a null value is encountered, the output is ``None``. + This method considers ``np.nan``, ``None``, and ``pd.NaT`` to be null + values. Default is ``True``. + + Returns: + The min result. + + For different values of ``on``, the return varies: + + - ``on=None``: an dict containing the column-wise min of + all columns, + - ``on="col"``: a scalar representing the min of all items in + column ``"col"``, + - ``on=["col_1", ..., "col_n"]``: an n-column dict + containing the column-wise min of the provided columns. + + If the dataset is empty, all values are null. If ``ignore_nulls`` is + ``False`` and any value is null, then the output is ``None``. + """ + ret = self._aggregate_on(Min, on, ignore_nulls) + return self._aggregate_result(ret) + + @AllToAllAPI + @ConsumptionAPI + @PublicAPI(api_group=GGA_API_GROUP) + def max( + self, on: Optional[Union[str, List[str]]] = None, ignore_nulls: bool = True + ) -> Union[Any, Dict[str, Any]]: + """Return the maximum of one or more columns. + + Examples: + >>> import ray + >>> ray.data.range(100).max("id") + 99 + >>> ray.data.from_items([ + ... {"A": i, "B": i**2} + ... for i in range(100) + ... ]).max(["A", "B"]) + {'max(A)': 99, 'max(B)': 9801} + + Args: + on: a column name or a list of column names to aggregate. + ignore_nulls: Whether to ignore null values. If ``True``, null + values are ignored when computing the max; if ``False``, + when a null value is encountered, the output is ``None``. + This method considers ``np.nan``, ``None``, and ``pd.NaT`` to be null + values. Default is ``True``. + + Returns: + The max result. + + For different values of ``on``, the return varies: + + - ``on=None``: an dict containing the column-wise max of + all columns, + - ``on="col"``: a scalar representing the max of all items in + column ``"col"``, + - ``on=["col_1", ..., "col_n"]``: an n-column dict + containing the column-wise max of the provided columns. + + If the dataset is empty, all values are null. If ``ignore_nulls`` is + ``False`` and any value is null, then the output is ``None``. + """ + ret = self._aggregate_on(Max, on, ignore_nulls) + return self._aggregate_result(ret) + + @AllToAllAPI + @ConsumptionAPI + @PublicAPI(api_group=GGA_API_GROUP) + def mean( + self, on: Optional[Union[str, List[str]]] = None, ignore_nulls: bool = True + ) -> Union[Any, Dict[str, Any]]: + """Compute the mean of one or more columns. + + Examples: + >>> import ray + >>> ray.data.range(100).mean("id") + 49.5 + >>> ray.data.from_items([ + ... {"A": i, "B": i**2} + ... for i in range(100) + ... ]).mean(["A", "B"]) + {'mean(A)': 49.5, 'mean(B)': 3283.5} + + Args: + on: a column name or a list of column names to aggregate. + ignore_nulls: Whether to ignore null values. If ``True``, null + values are ignored when computing the mean; if ``False``, + when a null value is encountered, the output is ``None``. + This method considers ``np.nan``, ``None``, and ``pd.NaT`` to be null + values. Default is ``True``. + + Returns: + The mean result. + + For different values of ``on``, the return varies: + + - ``on=None``: an dict containing the column-wise mean of + all columns, + - ``on="col"``: a scalar representing the mean of all items in + column ``"col"``, + - ``on=["col_1", ..., "col_n"]``: an n-column dict + containing the column-wise mean of the provided columns. + + If the dataset is empty, all values are null. If ``ignore_nulls`` is + ``False`` and any value is null, then the output is ``None``. + """ + ret = self._aggregate_on(Mean, on, ignore_nulls) + return self._aggregate_result(ret) + + @AllToAllAPI + @ConsumptionAPI + @PublicAPI(api_group=GGA_API_GROUP) + def std( + self, + on: Optional[Union[str, List[str]]] = None, + ddof: int = 1, + ignore_nulls: bool = True, + ) -> Union[Any, Dict[str, Any]]: + """Compute the standard deviation of one or more columns. + + .. note:: + This method uses Welford's online method for an accumulator-style + computation of the standard deviation. This method has + numerical stability, and is computable in a single pass. This may give + different (but more accurate) results than NumPy, Pandas, and sklearn, which + use a less numerically stable two-pass algorithm. + To learn more, see + `the Wikapedia article `_. + + Examples: + >>> import ray + >>> round(ray.data.range(100).std("id", ddof=0), 5) + 28.86607 + >>> ray.data.from_items([ + ... {"A": i, "B": i**2} + ... for i in range(100) + ... ]).std(["A", "B"]) + {'std(A)': 29.011491975882016, 'std(B)': 2968.1748039269296} + + Args: + on: a column name or a list of column names to aggregate. + ddof: Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + ignore_nulls: Whether to ignore null values. If ``True``, null + values are ignored when computing the std; if ``False``, + when a null value is encountered, the output is ``None``. + This method considers ``np.nan``, ``None``, and ``pd.NaT`` to be null + values. Default is ``True``. + + Returns: + The standard deviation result. + + For different values of ``on``, the return varies: + + - ``on=None``: an dict containing the column-wise std of + all columns, + - ``on="col"``: a scalar representing the std of all items in + column ``"col"``, + - ``on=["col_1", ..., "col_n"]``: an n-column dict + containing the column-wise std of the provided columns. + + If the dataset is empty, all values are null. If ``ignore_nulls`` is + ``False`` and any value is null, then the output is ``None``. + """ # noqa: E501 + ret = self._aggregate_on(Std, on, ignore_nulls, ddof=ddof) + return self._aggregate_result(ret) + + @AllToAllAPI + @PublicAPI(api_group=SSR_API_GROUP) + def sort( + self, + key: Union[str, List[str], None] = None, + descending: Union[bool, List[bool]] = False, + boundaries: List[Union[int, float]] = None, + ) -> "Dataset": + """Sort the dataset by the specified key column or key function. + + .. note:: + The `descending` parameter must be a boolean, or a list of booleans. + If it is a list, all items in the list must share the same direction. + Multi-directional sort is not supported yet. + + Examples: + >>> import ray + >>> ds = ray.data.range(15) + >>> ds = ds.sort("id", descending=False, boundaries=[5, 10]) + >>> for df in ray.get(ds.to_pandas_refs()): + ... print(df) + id + 0 0 + 1 1 + 2 2 + 3 3 + 4 4 + id + 0 5 + 1 6 + 2 7 + 3 8 + 4 9 + id + 0 10 + 1 11 + 2 12 + 3 13 + 4 14 + + Time complexity: O(dataset size * log(dataset size / parallelism)) + + Args: + key: The column or a list of columns to sort by. + descending: Whether to sort in descending order. Must be a boolean or a list + of booleans matching the number of the columns. + boundaries: The list of values based on which to repartition the dataset. + For example, if the input boundary is [10,20], rows with values less + than 10 will be divided into the first block, rows with values greater + than or equal to 10 and less than 20 will be divided into the + second block, and rows with values greater than or equal to 20 + will be divided into the third block. If not provided, the + boundaries will be sampled from the input blocks. This feature + only supports numeric columns right now. + + Returns: + A new, sorted :class:`Dataset`. + """ + sort_key = SortKey(key, descending, boundaries) + plan = self._plan.copy() + op = Sort( + self._logical_plan.dag, + sort_key=sort_key, + ) + logical_plan = LogicalPlan(op, self.context) + return Dataset(plan, logical_plan) + + @PublicAPI(api_group=SMD_API_GROUP) + def zip(self, other: "Dataset") -> "Dataset": + """Zip the columns of this dataset with the columns of another. + + The datasets must have the same number of rows. Their column sets are + merged, and any duplicate column names are disambiguated with suffixes like + ``"_1"``. + + .. note:: + The smaller of the two datasets is repartitioned to align the number + of rows per block with the larger dataset. + + .. note:: + Zipped datasets aren't lineage-serializable. As a result, they can't be used + as a tunable hyperparameter in Ray Tune. + + Examples: + >>> import ray + >>> ds1 = ray.data.range(5) + >>> ds2 = ray.data.range(5) + >>> ds1.zip(ds2).take_batch() + {'id': array([0, 1, 2, 3, 4]), 'id_1': array([0, 1, 2, 3, 4])} + + Args: + other: The dataset to zip with on the right hand side. + + Returns: + A :class:`Dataset` containing the columns of the second dataset + concatenated horizontally with the columns of the first dataset, + with duplicate column names disambiguated with suffixes like ``"_1"``. + """ + plan = self._plan.copy() + op = Zip(self._logical_plan.dag, other._logical_plan.dag) + logical_plan = LogicalPlan(op, self.context) + return Dataset(plan, logical_plan) + + @PublicAPI(api_group=BT_API_GROUP) + def limit(self, limit: int) -> "Dataset": + """Truncate the dataset to the first ``limit`` rows. + + Unlike :meth:`~Dataset.take`, this method doesn't move data to the caller's + machine. Instead, it returns a new :class:`Dataset` pointing to the truncated + distributed data. + + Examples: + >>> import ray + >>> ds = ray.data.range(1000) + >>> ds.limit(5).count() + 5 + + Time complexity: O(limit specified) + + Args: + limit: The size of the dataset to truncate to. + + Returns: + The truncated dataset. + """ + plan = self._plan.copy() + op = Limit(self._logical_plan.dag, limit=limit) + logical_plan = LogicalPlan(op, self.context) + return Dataset(plan, logical_plan) + + @ConsumptionAPI + @PublicAPI(api_group=CD_API_GROUP) + def take_batch( + self, batch_size: int = 20, *, batch_format: Optional[str] = "default" + ) -> DataBatch: + """Return up to ``batch_size`` rows from the :class:`Dataset` in a batch. + + Ray Data represents batches as NumPy arrays or pandas DataFrames. You can + configure the batch type by specifying ``batch_format``. + + This method is useful for inspecting inputs to :meth:`~Dataset.map_batches`. + + .. warning:: + + :meth:`~Dataset.take_batch` moves up to ``batch_size`` rows to the caller's + machine. If ``batch_size`` is large, this method can cause an ` + ``OutOfMemory`` error on the caller. + + Examples: + + >>> import ray + >>> ds = ray.data.range(100) + >>> ds.take_batch(5) + {'id': array([0, 1, 2, 3, 4])} + + Time complexity: O(batch_size specified) + + Args: + batch_size: The maximum number of rows to return. + batch_format: If ``"default"`` or ``"numpy"``, batches are + ``Dict[str, numpy.ndarray]``. If ``"pandas"``, batches are + ``pandas.DataFrame``. + + Returns: + A batch of up to ``batch_size`` rows from the dataset. + + Raises: + ``ValueError``: if the dataset is empty. + """ + batch_format = _apply_batch_format(batch_format) + limited_ds = self.limit(batch_size) + + try: + res = next( + iter( + limited_ds.iter_batches( + batch_size=batch_size, + prefetch_batches=0, + batch_format=batch_format, + ) + ) + ) + except StopIteration: + raise ValueError("The dataset is empty.") + self._synchronize_progress_bar() + + # Save the computed stats to the original dataset. + self._plan._snapshot_stats = limited_ds._plan.stats() + return res + + @ConsumptionAPI + @PublicAPI(api_group=CD_API_GROUP) + def take(self, limit: int = 20) -> List[Dict[str, Any]]: + """Return up to ``limit`` rows from the :class:`Dataset`. + + This method is useful for inspecting data. + + .. warning:: + + :meth:`~Dataset.take` moves up to ``limit`` rows to the caller's machine. If + ``limit`` is large, this method can cause an ``OutOfMemory`` error on the + caller. + + Examples: + + >>> import ray + >>> ds = ray.data.range(100) + >>> ds.take(3) + [{'id': 0}, {'id': 1}, {'id': 2}] + + Time complexity: O(limit specified) + + Args: + limit: The maximum number of rows to return. + + Returns: + A list of up to ``limit`` rows from the dataset. + + .. seealso:: + + :meth:`~Dataset.take_all` + Call this method to return all rows. + """ + if ray.util.log_once("dataset_take"): + logger.info( + "Tip: Use `take_batch()` instead of `take() / show()` to return " + "records in pandas or numpy batch format." + ) + output = [] + + limited_ds = self.limit(limit) + for row in limited_ds.iter_rows(): + output.append(row) + if len(output) >= limit: + break + self._synchronize_progress_bar() + + # Save the computed stats to the original dataset. + self._plan._snapshot_stats = limited_ds._plan.stats() + return output + + @ConsumptionAPI + @PublicAPI(api_group=CD_API_GROUP) + def take_all(self, limit: Optional[int] = None) -> List[Dict[str, Any]]: + """Return all of the rows in this :class:`Dataset`. + + This method is useful for inspecting small datasets. + + .. warning:: + + :meth:`~Dataset.take_all` moves the entire dataset to the caller's + machine. If the dataset is large, this method can cause an + ``OutOfMemory`` error on the caller. + + Examples: + >>> import ray + >>> ds = ray.data.range(5) + >>> ds.take_all() + [{'id': 0}, {'id': 1}, {'id': 2}, {'id': 3}, {'id': 4}] + + Time complexity: O(dataset size) + + Args: + limit: Raise an error if the size exceeds the specified limit. + + Returns: + A list of all the rows in the dataset. + + .. seealso:: + + :meth:`~Dataset.take` + Call this method to return a specific number of rows. + """ + output = [] + for row in self.iter_rows(): + output.append(row) + if limit is not None and len(output) > limit: + raise ValueError( + f"The dataset has more than the given limit of {limit} records." + ) + self._synchronize_progress_bar() + return output + + @ConsumptionAPI + @PublicAPI(api_group=CD_API_GROUP) + def show(self, limit: int = 20) -> None: + """Print up to the given number of rows from the :class:`Dataset`. + + This method is useful for inspecting data. + + Examples: + + >>> import ray + >>> ds = ray.data.range(100) + >>> ds.show(3) + {'id': 0} + {'id': 1} + {'id': 2} + + Time complexity: O(limit specified) + + Args: + limit: The maximum number of row to print. + + .. seealso:: + + :meth:`~Dataset.take` + Call this method to get (not print) a given number of rows. + """ + for row in self.take(limit): + print(row) + + @ConsumptionAPI( + if_more_than_read=True, + datasource_metadata="row count", + pattern="Examples:", + ) + @PublicAPI(api_group=IM_API_GROUP) + def count(self) -> int: + """Count the number of rows in the dataset. + + For Datasets which only read Parquet files (created with + :meth:`~ray.data.read_parquet`), this method reads the file metadata to + efficiently count the number of rows without reading in the entire data. + + Examples: + >>> import ray + >>> ds = ray.data.range(10) + >>> ds.count() + 10 + + Returns: + The number of records in the dataset. + """ + # Handle empty dataset. + if self._plan.initial_num_blocks() == 0: + return 0 + + # For parquet, we can return the count directly from metadata. + meta_count = self._meta_count() + if meta_count is not None: + return meta_count + + plan = self._plan.copy() + count_op = Count([self._logical_plan.dag]) + logical_plan = LogicalPlan(count_op, self.context) + count_ds = Dataset(plan, logical_plan) + + count = 0 + for batch in count_ds.iter_batches(batch_size=None): + assert Count.COLUMN_NAME in batch, ( + "Outputs from the 'Count' logical operator should contain a column " + f"named '{Count.COLUMN_NAME}'" + ) + count += batch[Count.COLUMN_NAME].sum() + # Explicitly cast to int to avoid returning `np.int64`, which is the result + # from calculating `sum()` from numpy batches. + return int(count) + + @ConsumptionAPI( + if_more_than_read=True, + datasource_metadata="schema", + extra_condition="or if ``fetch_if_missing=True`` (the default)", + pattern="Time complexity:", + ) + @PublicAPI(api_group=IM_API_GROUP) + def schema(self, fetch_if_missing: bool = True) -> Optional["Schema"]: + """Return the schema of the dataset. + + Examples: + >>> import ray + >>> ds = ray.data.range(10) + >>> ds.schema() + Column Type + ------ ---- + id int64 + + Time complexity: O(1) + + Args: + fetch_if_missing: If True, synchronously fetch the schema if it's + not known. If False, None is returned if the schema is not known. + Default is True. + + Returns: + The :class:`ray.data.Schema` class of the records, or None if the + schema is not known and fetch_if_missing is False. + """ + + context = self._plan._context + + # First check if the schema is already known from materialized blocks. + base_schema = self._plan.schema(fetch_if_missing=False) + if base_schema is not None: + return Schema(base_schema, data_context=context) + + # Lazily execute only the first block to minimize computation. We achieve this + # by appending a Limit[1] operation to a copy of this Dataset, which we then + # execute to get its schema. + base_schema = self.limit(1)._plan.schema(fetch_if_missing=fetch_if_missing) + if base_schema is not None: + self._plan.cache_schema(base_schema) + return Schema(base_schema, data_context=context) + else: + return None + + @ConsumptionAPI( + if_more_than_read=True, + datasource_metadata="schema", + extra_condition="or if ``fetch_if_missing=True`` (the default)", + pattern="Time complexity:", + ) + @PublicAPI(api_group=IM_API_GROUP) + def columns(self, fetch_if_missing: bool = True) -> Optional[List[str]]: + """Returns the columns of this Dataset. + + Time complexity: O(1) + + Example: + >>> import ray + >>> # Create dataset from synthetic data. + >>> ds = ray.data.range(1000) + >>> ds.columns() + ['id'] + + Args: + fetch_if_missing: If True, synchronously fetch the column names from the + schema if it's not known. If False, None is returned if the schema is + not known. Default is True. + + Returns: + A list of the column names for this Dataset or None if schema is not known + and `fetch_if_missing` is False. + + """ + schema = self.schema(fetch_if_missing=fetch_if_missing) + if schema is not None: + return schema.names + return None + + @PublicAPI(api_group=IM_API_GROUP) + def num_blocks(self) -> int: + """Return the number of blocks of this :class:`Dataset`. + + This method is only implemented for :class:`~ray.data.MaterializedDataset`, + since the number of blocks may dynamically change during execution. + For instance, during read and transform operations, Ray Data may dynamically + adjust the number of blocks to respect memory limits, increasing the + number of blocks at runtime. + + Returns: + The number of blocks of this :class:`Dataset`. + """ + raise NotImplementedError( + "Number of blocks is only available for `MaterializedDataset`," + "because the number of blocks may dynamically change during execution." + "Call `ds.materialize()` to get a `MaterializedDataset`." + ) + + @ConsumptionAPI + @PublicAPI(api_group=IM_API_GROUP) + def size_bytes(self) -> int: + """Return the in-memory size of the dataset. + + Examples: + >>> import ray + >>> ds = ray.data.range(10) + >>> ds.size_bytes() + 80 + + Returns: + The in-memory size of the dataset in bytes, or None if the + in-memory size is not known. + """ + # If the size is known from metadata, return it. + if self._logical_plan.dag.aggregate_output_metadata().size_bytes is not None: + return self._logical_plan.dag.aggregate_output_metadata().size_bytes + + metadata = self._plan.execute().metadata + if not metadata or metadata[0].size_bytes is None: + return None + return sum(m.size_bytes for m in metadata) + + @ConsumptionAPI + @PublicAPI(api_group=IM_API_GROUP) + def input_files(self) -> List[str]: + """Return the list of input files for the dataset. + + Examples: + >>> import ray + >>> ds = ray.data.read_csv("s3://anonymous@ray-example-data/iris.csv") + >>> ds.input_files() + ['ray-example-data/iris.csv'] + + Returns: + The list of input files used to create the dataset, or an empty + list if the input files is not known. + """ + return list(set(self._plan.input_files())) + + @ConsumptionAPI + @PublicAPI(api_group=IOC_API_GROUP) + def write_parquet( + self, + path: str, + *, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + try_create_dir: bool = True, + arrow_open_stream_args: Optional[Dict[str, Any]] = None, + filename_provider: Optional[FilenameProvider] = None, + arrow_parquet_args_fn: Optional[Callable[[], Dict[str, Any]]] = None, + num_rows_per_file: Optional[int] = None, + ray_remote_args: Dict[str, Any] = None, + concurrency: Optional[int] = None, + **arrow_parquet_args, + ) -> None: + """Writes the :class:`~ray.data.Dataset` to parquet files under the provided ``path``. + + The number of files is determined by the number of blocks in the dataset. + To control the number of number of blocks, call + :meth:`~ray.data.Dataset.repartition`. + + If pyarrow can't represent your data, this method errors. + + By default, the format of the output files is ``{uuid}_{block_idx}.parquet``, + where ``uuid`` is a unique id for the dataset. To modify this behavior, + implement a custom :class:`~ray.data.datasource.FilenameProvider` and pass it in + as the ``filename_provider`` argument. + + Examples: + >>> import ray + >>> ds = ray.data.range(100) + >>> ds.write_parquet("local:///tmp/data/") + + Time complexity: O(dataset size / parallelism) + + Args: + path: The path to the destination root directory, where + parquet files are written to. + filesystem: The pyarrow filesystem implementation to write to. + These filesystems are specified in the + `pyarrow docs `_. + Specify this if you need to provide specific configurations to the + filesystem. By default, the filesystem is automatically selected based + on the scheme of the paths. For example, if the path begins with + ``s3://``, the ``S3FileSystem`` is used. + try_create_dir: If ``True``, attempts to create all directories in the + destination path. Does nothing if all directories already + exist. Defaults to ``True``. + arrow_open_stream_args: kwargs passed to + `pyarrow.fs.FileSystem.open_output_stream `_, which is used when + opening the file to write to. + filename_provider: A :class:`~ray.data.datasource.FilenameProvider` + implementation. Use this parameter to customize what your filenames + look like. + arrow_parquet_args_fn: Callable that returns a dictionary of write + arguments that are provided to `pyarrow.parquet.write_table() `_ + when writing each block to a file. Overrides + any duplicate keys from ``arrow_parquet_args``. Use this argument + instead of ``arrow_parquet_args`` if any of your write arguments + can't pickled, or if you'd like to lazily resolve the write + arguments for each dataset block. + num_rows_per_file: [Experimental] The target number of rows to write to each + file. If ``None``, Ray Data writes a system-chosen number of rows to + each file. The specified value is a hint, not a strict limit. Ray Data + might write more or fewer rows to each file. In specific, if the number + of rows per block is larger than the specified value, Ray Data writes + the number of rows per block to each file. + ray_remote_args: Kwargs passed to :meth:`~ray.remote` in the write tasks. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run. By default, concurrency is dynamically + decided based on the available resources. + arrow_parquet_args: Options to pass to + `pyarrow.parquet.write_table() `_, which is used to write out each + block to a file. + """ # noqa: E501 + if arrow_parquet_args_fn is None: + arrow_parquet_args_fn = lambda: {} # noqa: E731 + + datasink = ParquetDatasink( + path, + arrow_parquet_args_fn=arrow_parquet_args_fn, + arrow_parquet_args=arrow_parquet_args, + num_rows_per_file=num_rows_per_file, + filesystem=filesystem, + try_create_dir=try_create_dir, + open_stream_args=arrow_open_stream_args, + filename_provider=filename_provider, + dataset_uuid=self._uuid, + ) + self.write_datasink( + datasink, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + ) + + @ConsumptionAPI + @PublicAPI(api_group=IOC_API_GROUP) + def write_json( + self, + path: str, + *, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + try_create_dir: bool = True, + arrow_open_stream_args: Optional[Dict[str, Any]] = None, + filename_provider: Optional[FilenameProvider] = None, + pandas_json_args_fn: Optional[Callable[[], Dict[str, Any]]] = None, + num_rows_per_file: Optional[int] = None, + ray_remote_args: Dict[str, Any] = None, + concurrency: Optional[int] = None, + **pandas_json_args, + ) -> None: + """Writes the :class:`~ray.data.Dataset` to JSON and JSONL files. + + The number of files is determined by the number of blocks in the dataset. + To control the number of number of blocks, call + :meth:`~ray.data.Dataset.repartition`. + + This method is only supported for datasets with records that are convertible to + pandas dataframes. + + By default, the format of the output files is ``{uuid}_{block_idx}.json``, + where ``uuid`` is a unique id for the dataset. To modify this behavior, + implement a custom :class:`~ray.data.datasource.FilenameProvider` and pass it in + as the ``filename_provider`` argument. + + Examples: + Write the dataset as JSON file to a local directory. + + >>> import ray + >>> import pandas as pd + >>> ds = ray.data.from_pandas([pd.DataFrame({"one": [1], "two": ["a"]})]) + >>> ds.write_json("local:///tmp/data") + + Write the dataset as JSONL files to a local directory. + + >>> ds = ray.data.read_json("s3://anonymous@ray-example-data/train.jsonl") + >>> ds.write_json("local:///tmp/data") + + Time complexity: O(dataset size / parallelism) + + Args: + path: The path to the destination root directory, where + the JSON files are written to. + filesystem: The pyarrow filesystem implementation to write to. + These filesystems are specified in the + `pyarrow docs `_. + Specify this if you need to provide specific configurations to the + filesystem. By default, the filesystem is automatically selected based + on the scheme of the paths. For example, if the path begins with + ``s3://``, the ``S3FileSystem`` is used. + try_create_dir: If ``True``, attempts to create all directories in the + destination path. Does nothing if all directories already + exist. Defaults to ``True``. + arrow_open_stream_args: kwargs passed to + `pyarrow.fs.FileSystem.open_output_stream `_, which is used when + opening the file to write to. + filename_provider: A :class:`~ray.data.datasource.FilenameProvider` + implementation. Use this parameter to customize what your filenames + look like. + pandas_json_args_fn: Callable that returns a dictionary of write + arguments that are provided to + `pandas.DataFrame.to_json() `_ + when writing each block to a file. Overrides + any duplicate keys from ``pandas_json_args``. Use this parameter + instead of ``pandas_json_args`` if any of your write arguments + can't be pickled, or if you'd like to lazily resolve the write + arguments for each dataset block. + num_rows_per_file: [Experimental] The target number of rows to write to each + file. If ``None``, Ray Data writes a system-chosen number of rows to + each file. The specified value is a hint, not a strict limit. Ray Data + might write more or fewer rows to each file. In specific, if the number + of rows per block is larger than the specified value, Ray Data writes + the number of rows per block to each file. + ray_remote_args: kwargs passed to :meth:`~ray.remote` in the write tasks. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run. By default, concurrency is dynamically + decided based on the available resources. + pandas_json_args: These args are passed to + `pandas.DataFrame.to_json() `_, + which is used under the hood to write out each + :class:`~ray.data.Dataset` block. These + are dict(orient="records", lines=True) by default. + """ + if pandas_json_args_fn is None: + pandas_json_args_fn = lambda: {} # noqa: E731 + + datasink = JSONDatasink( + path, + pandas_json_args_fn=pandas_json_args_fn, + pandas_json_args=pandas_json_args, + num_rows_per_file=num_rows_per_file, + filesystem=filesystem, + try_create_dir=try_create_dir, + open_stream_args=arrow_open_stream_args, + filename_provider=filename_provider, + dataset_uuid=self._uuid, + ) + self.write_datasink( + datasink, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + ) + + @PublicAPI(stability="alpha", api_group=IOC_API_GROUP) + @ConsumptionAPI + def write_images( + self, + path: str, + column: str, + file_format: str = "png", + *, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + try_create_dir: bool = True, + arrow_open_stream_args: Optional[Dict[str, Any]] = None, + filename_provider: Optional[FilenameProvider] = None, + ray_remote_args: Dict[str, Any] = None, + concurrency: Optional[int] = None, + ) -> None: + """Writes the :class:`~ray.data.Dataset` to images. + + Examples: + >>> import ray + >>> ds = ray.data.read_images("s3://anonymous@ray-example-data/image-datasets/simple") + >>> ds.write_images("local:///tmp/images", column="image") + + Time complexity: O(dataset size / parallelism) + + Args: + path: The path to the destination root directory, where + the images are written to. + column: The column containing the data you want to write to images. + file_format: The image file format to write with. For available options, + see `Image file formats `_. + filesystem: The pyarrow filesystem implementation to write to. + These filesystems are specified in the + `pyarrow docs `_. + Specify this if you need to provide specific configurations to the + filesystem. By default, the filesystem is automatically selected based + on the scheme of the paths. For example, if the path begins with + ``s3://``, the ``S3FileSystem`` is used. + try_create_dir: If ``True``, attempts to create all directories in the + destination path. Does nothing if all directories already + exist. Defaults to ``True``. + arrow_open_stream_args: kwargs passed to + `pyarrow.fs.FileSystem.open_output_stream `_, which is used when + opening the file to write to. + filename_provider: A :class:`~ray.data.datasource.FilenameProvider` + implementation. Use this parameter to customize what your filenames + look like. + ray_remote_args: kwargs passed to :meth:`~ray.remote` in the write tasks. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run. By default, concurrency is dynamically + decided based on the available resources. + """ # noqa: E501 + datasink = ImageDatasink( + path, + column, + file_format, + filesystem=filesystem, + try_create_dir=try_create_dir, + open_stream_args=arrow_open_stream_args, + filename_provider=filename_provider, + dataset_uuid=self._uuid, + ) + self.write_datasink( + datasink, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + ) + + @ConsumptionAPI + @PublicAPI(api_group=IOC_API_GROUP) + def write_csv( + self, + path: str, + *, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + try_create_dir: bool = True, + arrow_open_stream_args: Optional[Dict[str, Any]] = None, + filename_provider: Optional[FilenameProvider] = None, + arrow_csv_args_fn: Optional[Callable[[], Dict[str, Any]]] = None, + num_rows_per_file: Optional[int] = None, + ray_remote_args: Dict[str, Any] = None, + concurrency: Optional[int] = None, + **arrow_csv_args, + ) -> None: + """Writes the :class:`~ray.data.Dataset` to CSV files. + + The number of files is determined by the number of blocks in the dataset. + To control the number of number of blocks, call + :meth:`~ray.data.Dataset.repartition`. + + This method is only supported for datasets with records that are convertible to + pyarrow tables. + + By default, the format of the output files is ``{uuid}_{block_idx}.csv``, + where ``uuid`` is a unique id for the dataset. To modify this behavior, + implement a custom :class:`~ray.data.datasource.FilenameProvider` + and pass it in as the ``filename_provider`` argument. + + + Examples: + Write the dataset as CSV files to a local directory. + + >>> import ray + >>> ds = ray.data.range(100) + >>> ds.write_csv("local:///tmp/data") + + Write the dataset as CSV files to S3. + + >>> import ray + >>> ds = ray.data.range(100) + >>> ds.write_csv("s3://bucket/folder/) # doctest: +SKIP + + Time complexity: O(dataset size / parallelism) + + Args: + path: The path to the destination root directory, where + the CSV files are written to. + filesystem: The pyarrow filesystem implementation to write to. + These filesystems are specified in the + `pyarrow docs `_. + Specify this if you need to provide specific configurations to the + filesystem. By default, the filesystem is automatically selected based + on the scheme of the paths. For example, if the path begins with + ``s3://``, the ``S3FileSystem`` is used. + try_create_dir: If ``True``, attempts to create all directories in the + destination path if ``True``. Does nothing if all directories already + exist. Defaults to ``True``. + arrow_open_stream_args: kwargs passed to + `pyarrow.fs.FileSystem.open_output_stream `_, which is used when + opening the file to write to. + filename_provider: A :class:`~ray.data.datasource.FilenameProvider` + implementation. Use this parameter to customize what your filenames + look like. + arrow_csv_args_fn: Callable that returns a dictionary of write + arguments that are provided to `pyarrow.write.write_csv `_ when writing each + block to a file. Overrides any duplicate keys from ``arrow_csv_args``. + Use this argument instead of ``arrow_csv_args`` if any of your write + arguments cannot be pickled, or if you'd like to lazily resolve the + write arguments for each dataset block. + num_rows_per_file: [Experimental] The target number of rows to write to each + file. If ``None``, Ray Data writes a system-chosen number of rows to + each file. The specified value is a hint, not a strict limit. Ray Data + might write more or fewer rows to each file. In specific, if the number + of rows per block is larger than the specified value, Ray Data writes + the number of rows per block to each file. + ray_remote_args: kwargs passed to :meth:`~ray.remote` in the write tasks. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run. By default, concurrency is dynamically + decided based on the available resources. + arrow_csv_args: Options to pass to `pyarrow.write.write_csv `_ + when writing each block to a file. + """ + if arrow_csv_args_fn is None: + arrow_csv_args_fn = lambda: {} # noqa: E731 + + datasink = CSVDatasink( + path, + arrow_csv_args_fn=arrow_csv_args_fn, + arrow_csv_args=arrow_csv_args, + num_rows_per_file=num_rows_per_file, + filesystem=filesystem, + try_create_dir=try_create_dir, + open_stream_args=arrow_open_stream_args, + filename_provider=filename_provider, + dataset_uuid=self._uuid, + ) + self.write_datasink( + datasink, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + ) + + @ConsumptionAPI + @PublicAPI(api_group=IOC_API_GROUP) + def write_tfrecords( + self, + path: str, + *, + tf_schema: Optional["schema_pb2.Schema"] = None, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + try_create_dir: bool = True, + arrow_open_stream_args: Optional[Dict[str, Any]] = None, + filename_provider: Optional[FilenameProvider] = None, + num_rows_per_file: Optional[int] = None, + ray_remote_args: Dict[str, Any] = None, + concurrency: Optional[int] = None, + ) -> None: + """Write the :class:`~ray.data.Dataset` to TFRecord files. + + The `TFRecord `_ + files contain + `tf.train.Example `_ + records, with one Example record for each row in the dataset. + + .. warning:: + tf.train.Feature only natively stores ints, floats, and bytes, + so this function only supports datasets with these data types, + and will error if the dataset contains unsupported types. + + The number of files is determined by the number of blocks in the dataset. + To control the number of number of blocks, call + :meth:`~ray.data.Dataset.repartition`. + + This method is only supported for datasets with records that are convertible to + pyarrow tables. + + By default, the format of the output files is ``{uuid}_{block_idx}.tfrecords``, + where ``uuid`` is a unique id for the dataset. To modify this behavior, + implement a custom :class:`~ray.data.datasource.FilenameProvider` + and pass it in as the ``filename_provider`` argument. + + Examples: + >>> import ray + >>> ds = ray.data.range(100) + >>> ds.write_tfrecords("local:///tmp/data/") + + Time complexity: O(dataset size / parallelism) + + Args: + path: The path to the destination root directory, where tfrecords + files are written to. + filesystem: The pyarrow filesystem implementation to write to. + These filesystems are specified in the + `pyarrow docs `_. + Specify this if you need to provide specific configurations to the + filesystem. By default, the filesystem is automatically selected based + on the scheme of the paths. For example, if the path begins with + ``s3://``, the ``S3FileSystem`` is used. + try_create_dir: If ``True``, attempts to create all directories in the + destination path. Does nothing if all directories already + exist. Defaults to ``True``. + arrow_open_stream_args: kwargs passed to + `pyarrow.fs.FileSystem.open_output_stream `_, which is used when + opening the file to write to. + filename_provider: A :class:`~ray.data.datasource.FilenameProvider` + implementation. Use this parameter to customize what your filenames + look like. + num_rows_per_file: [Experimental] The target number of rows to write to each + file. If ``None``, Ray Data writes a system-chosen number of rows to + each file. The specified value is a hint, not a strict limit. Ray Data + might write more or fewer rows to each file. In specific, if the number + of rows per block is larger than the specified value, Ray Data writes + the number of rows per block to each file. + ray_remote_args: kwargs passed to :meth:`~ray.remote` in the write tasks. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run. By default, concurrency is dynamically + decided based on the available resources. + + """ + datasink = TFRecordDatasink( + path=path, + tf_schema=tf_schema, + num_rows_per_file=num_rows_per_file, + filesystem=filesystem, + try_create_dir=try_create_dir, + open_stream_args=arrow_open_stream_args, + filename_provider=filename_provider, + dataset_uuid=self._uuid, + ) + self.write_datasink( + datasink, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + ) + + @ConsumptionAPI + @PublicAPI(stability="alpha", api_group=IOC_API_GROUP) + def write_webdataset( + self, + path: str, + *, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + try_create_dir: bool = True, + arrow_open_stream_args: Optional[Dict[str, Any]] = None, + filename_provider: Optional[FilenameProvider] = None, + num_rows_per_file: Optional[int] = None, + ray_remote_args: Dict[str, Any] = None, + encoder: Optional[Union[bool, str, callable, list]] = True, + concurrency: Optional[int] = None, + ) -> None: + """Writes the dataset to `WebDataset `_ files. + + The `TFRecord `_ + files will contain + `tf.train.Example `_ # noqa: E501 + records, with one Example record for each row in the dataset. + + .. warning:: + tf.train.Feature only natively stores ints, floats, and bytes, + so this function only supports datasets with these data types, + and will error if the dataset contains unsupported types. + + This is only supported for datasets convertible to Arrow records. + To control the number of files, use :meth:`Dataset.repartition`. + + Unless a custom filename provider is given, the format of the output + files is ``{uuid}_{block_idx}.tfrecords``, where ``uuid`` is a unique id + for the dataset. + + Examples: + + .. testcode:: + :skipif: True + + import ray + + ds = ray.data.range(100) + ds.write_webdataset("s3://bucket/folder/") + + Time complexity: O(dataset size / parallelism) + + Args: + path: The path to the destination root directory, where tfrecords + files are written to. + filesystem: The filesystem implementation to write to. + try_create_dir: If ``True``, attempts to create all + directories in the destination path. Does nothing if all directories + already exist. Defaults to ``True``. + arrow_open_stream_args: kwargs passed to + ``pyarrow.fs.FileSystem.open_output_stream`` + filename_provider: A :class:`~ray.data.datasource.FilenameProvider` + implementation. Use this parameter to customize what your filenames + look like. + num_rows_per_file: [Experimental] The target number of rows to write to each + file. If ``None``, Ray Data writes a system-chosen number of rows to + each file. The specified value is a hint, not a strict limit. Ray Data + might write more or fewer rows to each file. In specific, if the number + of rows per block is larger than the specified value, Ray Data writes + the number of rows per block to each file. + ray_remote_args: Kwargs passed to ``ray.remote`` in the write tasks. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run. By default, concurrency is dynamically + decided based on the available resources. + + """ + datasink = WebDatasetDatasink( + path, + encoder=encoder, + num_rows_per_file=num_rows_per_file, + filesystem=filesystem, + try_create_dir=try_create_dir, + open_stream_args=arrow_open_stream_args, + filename_provider=filename_provider, + dataset_uuid=self._uuid, + ) + self.write_datasink( + datasink, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + ) + + @ConsumptionAPI + @PublicAPI(api_group=IOC_API_GROUP) + def write_numpy( + self, + path: str, + *, + column: str, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + try_create_dir: bool = True, + arrow_open_stream_args: Optional[Dict[str, Any]] = None, + filename_provider: Optional[FilenameProvider] = None, + num_rows_per_file: Optional[int] = None, + ray_remote_args: Dict[str, Any] = None, + concurrency: Optional[int] = None, + ) -> None: + """Writes a column of the :class:`~ray.data.Dataset` to .npy files. + + This is only supported for columns in the datasets that can be converted to + NumPy arrays. + + The number of files is determined by the number of blocks in the dataset. + To control the number of number of blocks, call + :meth:`~ray.data.Dataset.repartition`. + + + By default, the format of the output files is ``{uuid}_{block_idx}.npy``, + where ``uuid`` is a unique id for the dataset. To modify this behavior, + implement a custom :class:`~ray.data.datasource.FilenameProvider` + and pass it in as the ``filename_provider`` argument. + + Examples: + >>> import ray + >>> ds = ray.data.range(100) + >>> ds.write_numpy("local:///tmp/data/", column="id") + + Time complexity: O(dataset size / parallelism) + + Args: + path: The path to the destination root directory, where + the npy files are written to. + column: The name of the column that contains the data to + be written. + filesystem: The pyarrow filesystem implementation to write to. + These filesystems are specified in the + `pyarrow docs `_. + Specify this if you need to provide specific configurations to the + filesystem. By default, the filesystem is automatically selected based + on the scheme of the paths. For example, if the path begins with + ``s3://``, the ``S3FileSystem`` is used. + try_create_dir: If ``True``, attempts to create all directories in + destination path. Does nothing if all directories already + exist. Defaults to ``True``. + arrow_open_stream_args: kwargs passed to + `pyarrow.fs.FileSystem.open_output_stream `_, which is used when + opening the file to write to. + filename_provider: A :class:`~ray.data.datasource.FilenameProvider` + implementation. Use this parameter to customize what your filenames + look like. + num_rows_per_file: [Experimental] The target number of rows to write to each + file. If ``None``, Ray Data writes a system-chosen number of rows to + each file. The specified value is a hint, not a strict limit. Ray Data + might write more or fewer rows to each file. In specific, if the number + of rows per block is larger than the specified value, Ray Data writes + the number of rows per block to each file. + ray_remote_args: kwargs passed to :meth:`~ray.remote` in the write tasks. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run. By default, concurrency is dynamically + decided based on the available resources. + """ + + datasink = NumpyDatasink( + path, + column, + num_rows_per_file=num_rows_per_file, + filesystem=filesystem, + try_create_dir=try_create_dir, + open_stream_args=arrow_open_stream_args, + filename_provider=filename_provider, + dataset_uuid=self._uuid, + ) + self.write_datasink( + datasink, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + ) + + @ConsumptionAPI + def write_sql( + self, + sql: str, + connection_factory: Callable[[], Connection], + ray_remote_args: Optional[Dict[str, Any]] = None, + concurrency: Optional[int] = None, + ) -> None: + """Write to a database that provides a + `Python DB API2-compliant `_ connector. + + .. note:: + + This method writes data in parallel using the DB API2 ``executemany`` + method. To learn more about this method, see + `PEP 249 `_. + + Examples: + + .. testcode:: + + import sqlite3 + import ray + + connection = sqlite3.connect("example.db") + connection.cursor().execute("CREATE TABLE movie(title, year, score)") + dataset = ray.data.from_items([ + {"title": "Monty Python and the Holy Grail", "year": 1975, "score": 8.2}, + {"title": "And Now for Something Completely Different", "year": 1971, "score": 7.5} + ]) + + dataset.write_sql( + "INSERT INTO movie VALUES(?, ?, ?)", lambda: sqlite3.connect("example.db") + ) + + result = connection.cursor().execute("SELECT * FROM movie ORDER BY year") + print(result.fetchall()) + + .. testoutput:: + + [('And Now for Something Completely Different', 1971, 7.5), ('Monty Python and the Holy Grail', 1975, 8.2)] + + .. testcode:: + :hide: + + import os + os.remove("example.db") + + Arguments: + sql: An ``INSERT INTO`` statement that specifies the table to write to. The + number of parameters must match the number of columns in the table. + connection_factory: A function that takes no arguments and returns a + Python DB API2 + `Connection object `_. + ray_remote_args: Keyword arguments passed to :meth:`~ray.remote` in the + write tasks. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run. By default, concurrency is dynamically + decided based on the available resources. + """ # noqa: E501 + datasink = SQLDatasink(sql=sql, connection_factory=connection_factory) + self.write_datasink( + datasink, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + ) + + @PublicAPI(stability="alpha", api_group=IOC_API_GROUP) + @ConsumptionAPI + def write_mongo( + self, + uri: str, + database: str, + collection: str, + ray_remote_args: Dict[str, Any] = None, + concurrency: Optional[int] = None, + ) -> None: + """Writes the :class:`~ray.data.Dataset` to a MongoDB database. + + This method is only supported for datasets convertible to pyarrow tables. + + The number of parallel writes is determined by the number of blocks in the + dataset. To control the number of number of blocks, call + :meth:`~ray.data.Dataset.repartition`. + + .. warning:: + This method supports only a subset of the PyArrow's types, due to the + limitation of pymongoarrow which is used underneath. Writing unsupported + types fails on type checking. See all the supported types at: + https://mongo-arrow.readthedocs.io/en/latest/data_types.html. + + .. note:: + The records are inserted into MongoDB as new documents. If a record has + the _id field, this _id must be non-existent in MongoDB, otherwise the write + is rejected and fail (hence preexisting documents are protected from + being mutated). It's fine to not have _id field in record and MongoDB will + auto generate one at insertion. + + Examples: + + .. testcode:: + :skipif: True + + import ray + + ds = ray.data.range(100) + ds.write_mongo( + uri="mongodb://username:password@mongodb0.example.com:27017/?authSource=admin", + database="my_db", + collection="my_collection" + ) + + Args: + uri: The URI to the destination MongoDB where the dataset is + written to. For the URI format, see details in the + `MongoDB docs `_. + database: The name of the database. This database must exist otherwise + a ValueError is raised. + collection: The name of the collection in the database. This collection + must exist otherwise a ValueError is raised. + ray_remote_args: kwargs passed to :meth:`~ray.remote` in the write tasks. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run. By default, concurrency is dynamically + decided based on the available resources. + + Raises: + ValueError: if ``database`` doesn't exist. + ValueError: if ``collection`` doesn't exist. + """ + datasink = MongoDatasink( + uri=uri, + database=database, + collection=collection, + ) + self.write_datasink( + datasink, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + ) + + @ConsumptionAPI + def write_bigquery( + self, + project_id: str, + dataset: str, + max_retry_cnt: int = 10, + overwrite_table: Optional[bool] = True, + ray_remote_args: Dict[str, Any] = None, + concurrency: Optional[int] = None, + ) -> None: + """Write the dataset to a BigQuery dataset table. + + To control the number of parallel write tasks, use ``.repartition()`` + before calling this method. + + Examples: + .. testcode:: + :skipif: True + + import ray + import pandas as pd + + docs = [{"title": "BigQuery Datasource test"} for key in range(4)] + ds = ray.data.from_pandas(pd.DataFrame(docs)) + ds.write_bigquery( + project_id="my_project_id", + dataset="my_dataset_table", + overwrite_table=True + ) + + Args: + project_id: The name of the associated Google Cloud Project that hosts + the dataset to read. For more information, see details in + `Creating and managing projects `_. + dataset: The name of the dataset in the format of ``dataset_id.table_id``. + The dataset is created if it doesn't already exist. + max_retry_cnt: The maximum number of retries that an individual block write + is retried due to BigQuery rate limiting errors. This isn't + related to Ray fault tolerance retries. The default number of retries + is 10. + overwrite_table: Whether the write will overwrite the table if it already + exists. The default behavior is to overwrite the table. + ``overwrite_table=False`` will append to the table if it exists. + ray_remote_args: Kwargs passed to ray.remote in the write tasks. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run. By default, concurrency is dynamically + decided based on the available resources. + """ # noqa: E501 + if ray_remote_args is None: + ray_remote_args = {} + + # Each write task will launch individual remote tasks to write each block + # To avoid duplicate block writes, the write task should not be retried + if ray_remote_args.get("max_retries", 0) != 0: + warnings.warn( + "The max_retries of a BigQuery Write Task should be set to 0" + " to avoid duplicate writes." + ) + else: + ray_remote_args["max_retries"] = 0 + + datasink = BigQueryDatasink( + project_id=project_id, + dataset=dataset, + max_retry_cnt=max_retry_cnt, + overwrite_table=overwrite_table, + ) + self.write_datasink( + datasink, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + ) + + @ConsumptionAPI(pattern="Time complexity:") + def write_datasink( + self, + datasink: Datasink, + *, + ray_remote_args: Dict[str, Any] = None, + concurrency: Optional[int] = None, + ) -> None: + """Writes the dataset to a custom :class:`~ray.data.Datasink`. + + Time complexity: O(dataset size / parallelism) + + Args: + datasink: The :class:`~ray.data.Datasink` to write to. + ray_remote_args: Kwargs passed to ``ray.remote`` in the write tasks. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run. By default, concurrency is dynamically + decided based on the available resources. + """ # noqa: E501 + if ray_remote_args is None: + ray_remote_args = {} + + if not datasink.supports_distributed_writes: + if ray.util.client.ray.is_connected(): + raise ValueError( + "If you're using Ray Client, Ray Data won't schedule write tasks " + "on the driver's node." + ) + ray_remote_args["scheduling_strategy"] = NodeAffinitySchedulingStrategy( + ray.get_runtime_context().get_node_id(), + soft=False, + ) + + plan = self._plan.copy() + write_op = Write( + self._logical_plan.dag, + datasink, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + ) + logical_plan = LogicalPlan(write_op, self.context) + + try: + import pandas as pd + + datasink.on_write_start() + + self._write_ds = Dataset(plan, logical_plan).materialize() + # TODO: Get and handle the blocks with an iterator instead of getting + # everything in a blocking way, so some blocks can be freed earlier. + raw_write_results = ray.get(self._write_ds._plan.execute().block_refs) + assert all( + isinstance(block, pd.DataFrame) and len(block) == 1 + for block in raw_write_results + ) + datasink.on_write_complete(raw_write_results) + + except Exception as e: + datasink.on_write_failed(e) + raise + + @ConsumptionAPI( + delegate=( + "Calling any of the consumption methods on the returned ``DataIterator``" + ), + pattern="Returns:", + ) + @PublicAPI(api_group=CD_API_GROUP) + def iterator(self) -> DataIterator: + """Return a :class:`~ray.data.DataIterator` over this dataset. + + Don't call this method directly. Use it internally. + + Returns: + A :class:`~ray.data.DataIterator` over this dataset. + """ + return DataIteratorImpl(self) + + @ConsumptionAPI + @PublicAPI(api_group=CD_API_GROUP) + def iter_rows(self) -> Iterable[Dict[str, Any]]: + """Return an iterable over the rows in this dataset. + + Examples: + >>> import ray + >>> for row in ray.data.range(3).iter_rows(): + ... print(row) + {'id': 0} + {'id': 1} + {'id': 2} + + Time complexity: O(1) + + Returns: + An iterable over the rows in this dataset. + """ + return self.iterator().iter_rows() + + @ConsumptionAPI + @PublicAPI(api_group=CD_API_GROUP) + def iter_batches( + self, + *, + prefetch_batches: int = 1, + batch_size: Optional[int] = 256, + batch_format: Optional[str] = "default", + drop_last: bool = False, + local_shuffle_buffer_size: Optional[int] = None, + local_shuffle_seed: Optional[int] = None, + _collate_fn: Optional[Callable[[DataBatch], CollatedData]] = None, + ) -> Iterable[DataBatch]: + """Return an iterable over batches of data. + + This method is useful for model training. + + Examples: + + .. testcode:: + + import ray + + ds = ray.data.read_images("example://image-datasets/simple") + + for batch in ds.iter_batches(batch_size=2, batch_format="numpy"): + print(batch) + + .. testoutput:: + :options: +MOCK + + {'image': array([[[[...]]]], dtype=uint8)} + ... + {'image': array([[[[...]]]], dtype=uint8)} + + Time complexity: O(1) + + Args: + prefetch_batches: The number of batches to fetch ahead of the current batch + to fetch. If set to greater than 0, a separate threadpool is used + to fetch the objects to the local node and format the batches. Defaults + to 1. + batch_size: The number of rows in each batch, or ``None`` to use entire + blocks as batches (blocks may contain different numbers of rows). + The final batch may include fewer than ``batch_size`` rows if + ``drop_last`` is ``False``. Defaults to 256. + batch_format: If ``"default"`` or ``"numpy"``, batches are + ``Dict[str, numpy.ndarray]``. If ``"pandas"``, batches are + ``pandas.DataFrame``. + drop_last: Whether to drop the last batch if it's incomplete. + local_shuffle_buffer_size: If not ``None``, the data is randomly shuffled + using a local in-memory shuffle buffer, and this value serves as the + minimum number of rows that must be in the local in-memory shuffle + buffer in order to yield a batch. When there are no more rows to add to + the buffer, the remaining rows in the buffer are drained. + local_shuffle_seed: The seed to use for the local random shuffle. + + Returns: + An iterable over batches of data. + """ + batch_format = _apply_batch_format(batch_format) + return self.iterator().iter_batches( + prefetch_batches=prefetch_batches, + batch_size=batch_size, + batch_format=batch_format, + drop_last=drop_last, + local_shuffle_buffer_size=local_shuffle_buffer_size, + local_shuffle_seed=local_shuffle_seed, + _collate_fn=_collate_fn, + ) + + @ConsumptionAPI + @PublicAPI(api_group=CD_API_GROUP) + def iter_torch_batches( + self, + *, + prefetch_batches: int = 1, + batch_size: Optional[int] = 256, + dtypes: Optional[Union["torch.dtype", Dict[str, "torch.dtype"]]] = None, + device: str = "auto", + collate_fn: Optional[Callable[[Dict[str, np.ndarray]], CollatedData]] = None, + drop_last: bool = False, + local_shuffle_buffer_size: Optional[int] = None, + local_shuffle_seed: Optional[int] = None, + ) -> Iterable[TorchBatchType]: + """Return an iterable over batches of data represented as Torch tensors. + + This iterable yields batches of type ``Dict[str, torch.Tensor]``. + For more flexibility, call :meth:`~Dataset.iter_batches` and manually convert + your data to Torch tensors. + + Examples: + >>> import ray + >>> for batch in ray.data.range( + ... 12, + ... ).iter_torch_batches(batch_size=4): + ... print(batch) + {'id': tensor([0, 1, 2, 3])} + {'id': tensor([4, 5, 6, 7])} + {'id': tensor([ 8, 9, 10, 11])} + + Use the ``collate_fn`` to customize how the tensor batch is created. + + >>> from typing import Any, Dict + >>> import torch + >>> import numpy as np + >>> import ray + >>> def collate_fn(batch: Dict[str, np.ndarray]) -> Any: + ... return torch.stack( + ... [torch.as_tensor(array) for array in batch.values()], + ... axis=1 + ... ) + >>> dataset = ray.data.from_items([ + ... {"col_1": 1, "col_2": 2}, + ... {"col_1": 3, "col_2": 4}]) + >>> for batch in dataset.iter_torch_batches(collate_fn=collate_fn): + ... print(batch) + tensor([[1, 2], + [3, 4]]) + + + Time complexity: O(1) + + Args: + prefetch_batches: The number of batches to fetch ahead of the current batch + to fetch. If set to greater than 0, a separate threadpool is used + to fetch the objects to the local node, format the batches, and apply + the ``collate_fn``. Defaults to 1. + batch_size: The number of rows in each batch, or ``None`` to use entire + blocks as batches (blocks may contain different number of rows). + The final batch may include fewer than ``batch_size`` rows if + ``drop_last`` is ``False``. Defaults to 256. + dtypes: The Torch dtype(s) for the created tensor(s); if ``None``, the dtype + is inferred from the tensor data. You can't use this parameter with + ``collate_fn``. + device: The device on which the tensor should be placed. Defaults to + "auto" which moves the tensors to the appropriate device when the + Dataset is passed to Ray Train and ``collate_fn`` is not provided. + Otherwise, defaults to CPU. You can't use this parameter with + ``collate_fn``. + collate_fn: A function to convert a Numpy batch to a PyTorch tensor batch. + When this parameter is specified, the user should manually handle the + host to device data transfer outside of collate_fn. + This is useful for further processing the data after it has been + batched. Potential use cases include collating along a dimension other + than the first, padding sequences of various lengths, or generally + handling batches of different length tensors. If not provided, the + default collate function is used which simply converts the batch of + numpy arrays to a batch of PyTorch tensors. This API is still + experimental and is subject to change. You can't use this parameter in + conjunction with ``dtypes`` or ``device``. + drop_last: Whether to drop the last batch if it's incomplete. + local_shuffle_buffer_size: If not ``None``, the data is randomly shuffled + using a local in-memory shuffle buffer, and this value serves as the + minimum number of rows that must be in the local in-memory shuffle + buffer in order to yield a batch. When there are no more rows to add to + the buffer, the remaining rows in the buffer are drained. + ``batch_size`` must also be specified when using local shuffling. + local_shuffle_seed: The seed to use for the local random shuffle. + + Returns: + An iterable over Torch Tensor batches. + + .. seealso:: + :meth:`Dataset.iter_batches` + Call this method to manually convert your data to Torch tensors. + """ # noqa: E501 + return self.iterator().iter_torch_batches( + prefetch_batches=prefetch_batches, + batch_size=batch_size, + dtypes=dtypes, + device=device, + collate_fn=collate_fn, + drop_last=drop_last, + local_shuffle_buffer_size=local_shuffle_buffer_size, + local_shuffle_seed=local_shuffle_seed, + ) + + @ConsumptionAPI + @Deprecated + def iter_tf_batches( + self, + *, + prefetch_batches: int = 1, + batch_size: Optional[int] = 256, + dtypes: Optional[Union["tf.dtypes.DType", Dict[str, "tf.dtypes.DType"]]] = None, + drop_last: bool = False, + local_shuffle_buffer_size: Optional[int] = None, + local_shuffle_seed: Optional[int] = None, + ) -> Iterable[TensorFlowTensorBatchType]: + """Return an iterable over batches of data represented as TensorFlow tensors. + + This iterable yields batches of type ``Dict[str, tf.Tensor]``. + For more flexibility, call :meth:`~Dataset.iter_batches` and manually convert + your data to TensorFlow tensors. + + .. tip:: + If you don't need the additional flexibility provided by this method, + consider using :meth:`~ray.data.Dataset.to_tf` instead. It's easier + to use. + + Examples: + + .. testcode:: + + import ray + + ds = ray.data.read_csv("s3://anonymous@air-example-data/iris.csv") + + tf_dataset = ds.to_tf( + feature_columns="sepal length (cm)", + label_columns="target", + batch_size=2 + ) + for features, labels in tf_dataset: + print(features, labels) + + .. testoutput:: + + tf.Tensor([5.1 4.9], shape=(2,), dtype=float64) tf.Tensor([0 0], shape=(2,), dtype=int64) + ... + tf.Tensor([6.2 5.9], shape=(2,), dtype=float64) tf.Tensor([2 2], shape=(2,), dtype=int64) + + Time complexity: O(1) + + Args: + prefetch_batches: The number of batches to fetch ahead of the current batch + to fetch. If set to greater than 0, a separate threadpool is used + to fetch the objects to the local node, format the batches, and apply + the ``collate_fn``. Defaults to 1. + batch_size: The number of rows in each batch, or ``None`` to use entire + blocks as batches (blocks may contain different numbers of rows). + The final batch may include fewer than ``batch_size`` rows if + ``drop_last`` is ``False``. Defaults to 256. + dtypes: The TensorFlow dtype(s) for the created tensor(s); if ``None``, the + dtype is inferred from the tensor data. + drop_last: Whether to drop the last batch if it's incomplete. + local_shuffle_buffer_size: If not ``None``, the data is randomly shuffled + using a local in-memory shuffle buffer, and this value serves as the + minimum number of rows that must be in the local in-memory shuffle + buffer in order to yield a batch. When there are no more rows to add to + the buffer, the remaining rows in the buffer are drained. + ``batch_size`` must also be specified when using local shuffling. + local_shuffle_seed: The seed to use for the local random shuffle. + + Returns: + An iterable over TensorFlow Tensor batches. + + .. seealso:: + :meth:`Dataset.iter_batches` + Call this method to manually convert your data to TensorFlow tensors. + """ # noqa: E501 + warnings.warn( + "`iter_tf_batches` is deprecated and will be removed after May 2025. Use " + "`to_tf` instead.", + DeprecationWarning, + ) + return self.iterator().iter_tf_batches( + prefetch_batches=prefetch_batches, + batch_size=batch_size, + dtypes=dtypes, + drop_last=drop_last, + local_shuffle_buffer_size=local_shuffle_buffer_size, + local_shuffle_seed=local_shuffle_seed, + ) + + @ConsumptionAPI(pattern="Time complexity:") + @Deprecated + def to_torch( + self, + *, + label_column: Optional[str] = None, + feature_columns: Optional[ + Union[List[str], List[List[str]], Dict[str, List[str]]] + ] = None, + label_column_dtype: Optional["torch.dtype"] = None, + feature_column_dtypes: Optional[ + Union["torch.dtype", List["torch.dtype"], Dict[str, "torch.dtype"]] + ] = None, + batch_size: int = 1, + prefetch_batches: int = 1, + drop_last: bool = False, + local_shuffle_buffer_size: Optional[int] = None, + local_shuffle_seed: Optional[int] = None, + unsqueeze_label_tensor: bool = True, + unsqueeze_feature_tensors: bool = True, + ) -> "torch.utils.data.IterableDataset": + """Return a + `Torch IterableDataset `_ + over this :class:`~ray.data.Dataset`. + + This is only supported for datasets convertible to Arrow records. + + It is recommended to use the returned ``IterableDataset`` directly + instead of passing it into a torch ``DataLoader``. + + Each element in ``IterableDataset`` is a tuple consisting of 2 + elements. The first item contains the feature tensor(s), and the + second item is the label tensor. Those can take on different + forms, depending on the specified arguments. + + For the features tensor (N is the ``batch_size`` and n, m, k + are the number of features per tensor): + + * If ``feature_columns`` is a ``List[str]``, the features is + a tensor of shape (N, n), with columns corresponding to + ``feature_columns`` + + * If ``feature_columns`` is a ``List[List[str]]``, the features is + a list of tensors of shape [(N, m),...,(N, k)], with columns of each + tensor corresponding to the elements of ``feature_columns`` + + * If ``feature_columns`` is a ``Dict[str, List[str]]``, the features + is a dict of key-tensor pairs of shape + {key1: (N, m),..., keyN: (N, k)}, with columns of each + tensor corresponding to the value of ``feature_columns`` under the + key. + + If ``unsqueeze_label_tensor=True`` (default), the label tensor is + of shape (N, 1). Otherwise, it is of shape (N,). + If ``label_column`` is specified as ``None``, then no column from the + ``Dataset`` is treated as the label, and the output label tensor + is ``None``. + + Note that you probably want to call :meth:`Dataset.split` on this dataset if + there are to be multiple Torch workers consuming the data. + + Time complexity: O(1) + + Args: + label_column: The name of the column used as the + label (second element of the output list). Can be None for + prediction, in which case the second element of returned + tuple will also be None. + feature_columns: The names of the columns + to use as the features. Can be a list of lists or + a dict of string-list pairs for multi-tensor output. + If ``None``, then use all columns except the label column as + the features. + label_column_dtype: The torch dtype to + use for the label column. If ``None``, then automatically infer + the dtype. + feature_column_dtypes: The dtypes to use for the feature + tensors. This should match the format of ``feature_columns``, + or be a single dtype, in which case it is applied to + all tensors. If ``None``, then automatically infer the dtype. + batch_size: How many samples per batch to yield at a time. + Defaults to 1. + prefetch_batches: The number of batches to fetch ahead of the current batch + to fetch. If set to greater than 0, a separate threadpool is used + to fetch the objects to the local node, format the batches, and apply + the collate_fn. Defaults to 1. + drop_last: Set to True to drop the last incomplete batch, + if the dataset size is not divisible by the batch size. If + False and the size of the stream is not divisible by the batch + size, then the last batch is smaller. Defaults to False. + local_shuffle_buffer_size: If non-None, the data is randomly shuffled + using a local in-memory shuffle buffer, and this value will serve as the + minimum number of rows that must be in the local in-memory shuffle + buffer in order to yield a batch. When there are no more rows to add to + the buffer, the remaining rows in the buffer is drained. This + buffer size must be greater than or equal to ``batch_size``, and + therefore ``batch_size`` must also be specified when using local + shuffling. + local_shuffle_seed: The seed to use for the local random shuffle. + unsqueeze_label_tensor: If set to True, the label tensor + is unsqueezed (reshaped to (N, 1)). Otherwise, it will + be left as is, that is (N, ). In general, regression loss + functions expect an unsqueezed tensor, while classification + loss functions expect a squeezed one. Defaults to True. + unsqueeze_feature_tensors: If set to True, the features tensors + are unsqueezed (reshaped to (N, 1)) before being concatenated into + the final features tensor. Otherwise, they are left as is, that is + (N, ). Defaults to True. + + Returns: + A `Torch IterableDataset`_. + """ # noqa: E501 + warnings.warn( + "`to_torch` is deprecated and will be removed after May 2025. Use " + "`iter_torch_batches` instead.", + DeprecationWarning, + ) + return self.iterator().to_torch( + label_column=label_column, + feature_columns=feature_columns, + label_column_dtype=label_column_dtype, + feature_column_dtypes=feature_column_dtypes, + batch_size=batch_size, + prefetch_batches=prefetch_batches, + drop_last=drop_last, + local_shuffle_buffer_size=local_shuffle_buffer_size, + local_shuffle_seed=local_shuffle_seed, + unsqueeze_label_tensor=unsqueeze_label_tensor, + unsqueeze_feature_tensors=unsqueeze_feature_tensors, + ) + + @ConsumptionAPI + @PublicAPI(api_group=IOC_API_GROUP) + def to_tf( + self, + feature_columns: Union[str, List[str]], + label_columns: Union[str, List[str]], + *, + additional_columns: Union[str, List[str]] = None, + prefetch_batches: int = 1, + batch_size: int = 1, + drop_last: bool = False, + local_shuffle_buffer_size: Optional[int] = None, + local_shuffle_seed: Optional[int] = None, + feature_type_spec: Union["tf.TypeSpec", Dict[str, "tf.TypeSpec"]] = None, + label_type_spec: Union["tf.TypeSpec", Dict[str, "tf.TypeSpec"]] = None, + additional_type_spec: Union["tf.TypeSpec", Dict[str, "tf.TypeSpec"]] = None, + ) -> "tf.data.Dataset": + """Return a `TensorFlow Dataset `_ + over this :class:`~ray.data.Dataset`. + + .. warning:: + If your :class:`~ray.data.Dataset` contains ragged tensors, this method errors. + To prevent errors, :ref:`resize your tensors `. + + Examples: + >>> import ray + >>> ds = ray.data.read_csv("s3://anonymous@air-example-data/iris.csv") + >>> ds + Dataset( + num_rows=?, + schema={ + sepal length (cm): double, + sepal width (cm): double, + petal length (cm): double, + petal width (cm): double, + target: int64 + } + ) + + If your model accepts a single tensor as input, specify a single feature column. + + >>> ds.to_tf(feature_columns="sepal length (cm)", label_columns="target") + <_OptionsDataset element_spec=(TensorSpec(shape=(None,), dtype=tf.float64, name='sepal length (cm)'), TensorSpec(shape=(None,), dtype=tf.int64, name='target'))> + + If your model accepts a dictionary as input, specify a list of feature columns. + + >>> ds.to_tf(["sepal length (cm)", "sepal width (cm)"], "target") + <_OptionsDataset element_spec=({'sepal length (cm)': TensorSpec(shape=(None,), dtype=tf.float64, name='sepal length (cm)'), 'sepal width (cm)': TensorSpec(shape=(None,), dtype=tf.float64, name='sepal width (cm)')}, TensorSpec(shape=(None,), dtype=tf.int64, name='target'))> + + If your dataset contains multiple features but your model accepts a single + tensor as input, combine features with + :class:`~ray.data.preprocessors.Concatenator`. + + >>> from ray.data.preprocessors import Concatenator + >>> columns_to_concat = ["sepal length (cm)", "sepal width (cm)", "petal length (cm)", "petal width (cm)"] + >>> preprocessor = Concatenator(columns=columns_to_concat, output_column_name="features") + >>> ds = preprocessor.transform(ds) + >>> ds + Concatenator + +- Dataset( + num_rows=?, + schema={ + sepal length (cm): double, + sepal width (cm): double, + petal length (cm): double, + petal width (cm): double, + target: int64 + } + ) + >>> ds.to_tf("features", "target") + <_OptionsDataset element_spec=(TensorSpec(shape=(None, 4), dtype=tf.float64, name='features'), TensorSpec(shape=(None,), dtype=tf.int64, name='target'))> + + If your model accepts different types, shapes, or names of tensors as input, specify the type spec. + If type specs are not specified, they are automatically inferred from the schema of the dataset. + + >>> import tensorflow as tf + >>> ds.to_tf( + ... feature_columns="features", + ... label_columns="target", + ... feature_type_spec=tf.TensorSpec(shape=(None, 4), dtype=tf.float32, name="features"), + ... label_type_spec=tf.TensorSpec(shape=(None,), dtype=tf.float32, name="label") + ... ) + <_OptionsDataset element_spec=(TensorSpec(shape=(None, 4), dtype=tf.float32, name='features'), TensorSpec(shape=(None,), dtype=tf.float32, name='label'))> + + If your model accepts additional metadata aside from features and label, specify a single additional column or a list of additional columns. + A common use case is to include sample weights in the data samples and train a ``tf.keras.Model`` with ``tf.keras.Model.fit``. + + >>> import pandas as pd + >>> ds = ds.add_column("sample weights", lambda df: pd.Series([1] * len(df))) + >>> ds.to_tf(feature_columns="features", label_columns="target", additional_columns="sample weights") + <_OptionsDataset element_spec=(TensorSpec(shape=(None, 4), dtype=tf.float64, name='features'), TensorSpec(shape=(None,), dtype=tf.int64, name='target'), TensorSpec(shape=(None,), dtype=tf.int64, name='sample weights'))> + + If your model accepts different types, shapes, or names for the additional metadata, specify the type spec of the additional column. + + >>> ds.to_tf( + ... feature_columns="features", + ... label_columns="target", + ... additional_columns="sample weights", + ... additional_type_spec=tf.TensorSpec(shape=(None,), dtype=tf.float32, name="weight") + ... ) + <_OptionsDataset element_spec=(TensorSpec(shape=(None, 4), dtype=tf.float64, name='features'), TensorSpec(shape=(None,), dtype=tf.int64, name='target'), TensorSpec(shape=(None,), dtype=tf.float32, name='weight'))> + + Args: + feature_columns: Columns that correspond to model inputs. If this is a + string, the input data is a tensor. If this is a list, the input data + is a ``dict`` that maps column names to their tensor representation. + label_columns: Columns that correspond to model targets. If this is a + string, the target data is a tensor. If this is a list, the target data + is a ``dict`` that maps column names to their tensor representation. + additional_columns: Columns that correspond to sample weights or other metadata. + If this is a string, the weight data is a tensor. If this is a list, the + weight data is a ``dict`` that maps column names to their tensor representation. + prefetch_batches: The number of batches to fetch ahead of the current batch + to fetch. If set to greater than 0, a separate threadpool is used + to fetch the objects to the local node, format the batches, and apply + the collate_fn. Defaults to 1. + batch_size: Record batch size. Defaults to 1. + drop_last: Set to True to drop the last incomplete batch, + if the dataset size is not divisible by the batch size. If + False and the size of the stream is not divisible by the batch + size, then the last batch is smaller. Defaults to False. + local_shuffle_buffer_size: If non-None, the data is randomly shuffled + using a local in-memory shuffle buffer, and this value will serve as the + minimum number of rows that must be in the local in-memory shuffle + buffer in order to yield a batch. When there are no more rows to add to + the buffer, the remaining rows in the buffer is drained. This + buffer size must be greater than or equal to ``batch_size``, and + therefore ``batch_size`` must also be specified when using local + shuffling. + local_shuffle_seed: The seed to use for the local random shuffle. + feature_type_spec: The `tf.TypeSpec` of `feature_columns`. If there is + only one column, specify a `tf.TypeSpec`. If there are multiple columns, + specify a ``dict`` that maps column names to their `tf.TypeSpec`. + Default is `None` to automatically infer the type of each column. + label_type_spec: The `tf.TypeSpec` of `label_columns`. If there is + only one column, specify a `tf.TypeSpec`. If there are multiple columns, + specify a ``dict`` that maps column names to their `tf.TypeSpec`. + Default is `None` to automatically infer the type of each column. + additional_type_spec: The `tf.TypeSpec` of `additional_columns`. If there + is only one column, specify a `tf.TypeSpec`. If there are multiple + columns, specify a ``dict`` that maps column names to their `tf.TypeSpec`. + Default is `None` to automatically infer the type of each column. + + Returns: + A `TensorFlow Dataset`_ that yields inputs and targets. + + .. seealso:: + + :meth:`~ray.data.Dataset.iter_tf_batches` + Call this method if you need more flexibility. + """ # noqa: E501 + + return self.iterator().to_tf( + feature_columns=feature_columns, + label_columns=label_columns, + additional_columns=additional_columns, + prefetch_batches=prefetch_batches, + drop_last=drop_last, + batch_size=batch_size, + local_shuffle_buffer_size=local_shuffle_buffer_size, + local_shuffle_seed=local_shuffle_seed, + feature_type_spec=feature_type_spec, + label_type_spec=label_type_spec, + additional_type_spec=additional_type_spec, + ) + + @ConsumptionAPI(pattern="Time complexity:") + @PublicAPI(api_group=IOC_API_GROUP) + def to_dask( + self, + meta: Union[ + "pandas.DataFrame", + "pandas.Series", + Dict[str, Any], + Iterable[Any], + Tuple[Any], + None, + ] = None, + verify_meta: bool = True, + ) -> "dask.dataframe.DataFrame": + """Convert this :class:`~ray.data.Dataset` into a + `Dask DataFrame `_. + + This is only supported for datasets convertible to Arrow records. + + Note that this function will set the Dask scheduler to Dask-on-Ray + globally, via the config. + + Time complexity: O(dataset size / parallelism) + + Args: + meta: An empty `pandas DataFrame`_ or `Series`_ that matches the dtypes and column + names of the stream. This metadata is necessary for many algorithms in + dask dataframe to work. For ease of use, some alternative inputs are + also available. Instead of a DataFrame, a dict of ``{name: dtype}`` or + iterable of ``(name, dtype)`` can be provided (note that the order of + the names should match the order of the columns). Instead of a series, a + tuple of ``(name, dtype)`` can be used. + By default, this is inferred from the underlying Dataset schema, + with this argument supplying an optional override. + verify_meta: If True, Dask will check that the partitions have consistent + metadata. Defaults to True. + + Returns: + A `Dask DataFrame`_ created from this dataset. + + .. _pandas DataFrame: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html + .. _Series: https://pandas.pydata.org/docs/reference/api/pandas.Series.html + """ # noqa: E501 + import dask + import dask.dataframe as dd + import pandas as pd + + try: + import pyarrow as pa + except Exception: + pa = None + + from ray.data._internal.pandas_block import PandasBlockSchema + from ray.util.client.common import ClientObjectRef + from ray.util.dask import ray_dask_get + + dask.config.set(scheduler=ray_dask_get) + + @dask.delayed + def block_to_df(block_ref: ObjectRef[Block]) -> pd.DataFrame: + if isinstance(block_ref, (ray.ObjectRef, ClientObjectRef)): + raise ValueError( + "Dataset.to_dask() must be used with Dask-on-Ray, please " + "set the Dask scheduler to ray_dask_get (located in " + "ray.util.dask)." + ) + return _block_to_df(block_ref) + + if meta is None: + from ray.data.extensions import TensorDtype + + # Infer Dask metadata from Dataset schema. + schema = self.schema(fetch_if_missing=True) + if isinstance(schema, PandasBlockSchema): + meta = pd.DataFrame( + { + col: pd.Series( + dtype=( + dtype + if not isinstance(dtype, TensorDtype) + else np.object_ + ) + ) + for col, dtype in zip(schema.names, schema.types) + } + ) + elif pa is not None and isinstance(schema, pa.Schema): + arrow_tensor_ext_types = get_arrow_extension_fixed_shape_tensor_types() + + if any( + isinstance(type_, arrow_tensor_ext_types) for type_ in schema.types + ): + meta = pd.DataFrame( + { + col: pd.Series( + dtype=( + dtype.to_pandas_dtype() + if not isinstance(dtype, arrow_tensor_ext_types) + else np.object_ + ) + ) + for col, dtype in zip(schema.names, schema.types) + } + ) + else: + meta = schema.empty_table().to_pandas() + + dfs = [] + for ref_bundle in self.iter_internal_ref_bundles(): + for block_ref in ref_bundle.block_refs: + dfs.append(block_to_df(block_ref)) + + ddf = dd.from_delayed( + dfs, + meta=meta, + verify_meta=verify_meta, + ) + return ddf + + @ConsumptionAPI(pattern="Time complexity:") + @PublicAPI(api_group=IOC_API_GROUP) + def to_mars(self) -> "mars.dataframe.DataFrame": + """Convert this :class:`~ray.data.Dataset` into a + `Mars DataFrame `_. + + Time complexity: O(dataset size / parallelism) + + Returns: + A `Mars DataFrame`_ created from this dataset. + """ # noqa: E501 + import pandas as pd + import pyarrow as pa + from mars.dataframe.datasource.read_raydataset import DataFrameReadRayDataset + from mars.dataframe.utils import parse_index + + from ray.data._internal.pandas_block import PandasBlockSchema + + refs = self.to_pandas_refs() + # remove this when https://github.com/mars-project/mars/issues/2945 got fixed + schema = self.schema() + if isinstance(schema, Schema): + schema = schema.base_schema + if isinstance(schema, PandasBlockSchema): + dtypes = pd.Series(schema.types, index=schema.names) + elif isinstance(schema, pa.Schema): + dtypes = schema.empty_table().to_pandas().dtypes + else: + raise NotImplementedError(f"Unsupported format of schema {schema}") + index_value = parse_index(pd.RangeIndex(-1)) + columns_value = parse_index(dtypes.index, store_data=True) + op = DataFrameReadRayDataset(refs=refs) + return op(index_value=index_value, columns_value=columns_value, dtypes=dtypes) + + @ConsumptionAPI(pattern="Time complexity:") + @PublicAPI(api_group=IOC_API_GROUP) + def to_modin(self) -> "modin.pandas.dataframe.DataFrame": + """Convert this :class:`~ray.data.Dataset` into a + `Modin DataFrame `_. + + This works by first converting this dataset into a distributed set of + Pandas DataFrames (using :meth:`Dataset.to_pandas_refs`). + See caveats there. Then the individual DataFrames are used to + create the Modin DataFrame using + ``modin.distributed.dataframe.pandas.partitions.from_partitions()``. + + This is only supported for datasets convertible to Arrow records. + This function induces a copy of the data. For zero-copy access to the + underlying data, consider using :meth:`.to_arrow_refs` or + :meth:`.iter_internal_ref_bundles`. + + Time complexity: O(dataset size / parallelism) + + Returns: + A `Modin DataFrame`_ created from this dataset. + """ # noqa: E501 + + from modin.distributed.dataframe.pandas.partitions import from_partitions + + pd_objs = self.to_pandas_refs() + return from_partitions(pd_objs, axis=0) + + @ConsumptionAPI(pattern="Time complexity:") + @PublicAPI(api_group=IOC_API_GROUP) + def to_spark(self, spark: "pyspark.sql.SparkSession") -> "pyspark.sql.DataFrame": + """Convert this :class:`~ray.data.Dataset` into a + `Spark DataFrame `_. + + Time complexity: O(dataset size / parallelism) + + Args: + spark: A `SparkSession`_, which must be created by RayDP (Spark-on-Ray). + + Returns: + A `Spark DataFrame`_ created from this dataset. + + .. _SparkSession: https://spark.apache.org/docs/3.1.1/api/python/reference/api/pyspark.sql.SparkSession.html + """ # noqa: E501 + import raydp + + schema = self.schema() + if isinstance(schema, Schema): + schema = schema.base_schema + + ref_bundles = self.iter_internal_ref_bundles() + block_refs = _ref_bundles_iterator_to_block_refs_list(ref_bundles) + return raydp.spark.ray_dataset_to_spark_dataframe(spark, schema, block_refs) + + @ConsumptionAPI(pattern="Time complexity:") + @PublicAPI(api_group=IOC_API_GROUP) + def to_pandas(self, limit: int = None) -> "pandas.DataFrame": + """Convert this :class:`~ray.data.Dataset` to a single pandas DataFrame. + + This method errors if the number of rows exceeds the provided ``limit``. + To truncate the dataset beforehand, call :meth:`.limit`. + + Examples: + >>> import ray + >>> ds = ray.data.from_items([{"a": i} for i in range(3)]) + >>> ds.to_pandas() + a + 0 0 + 1 1 + 2 2 + + Time complexity: O(dataset size) + + Args: + limit: The maximum number of rows to return. An error is + raised if the dataset has more rows than this limit. Defaults to + ``None``, which means no limit. + + Returns: + A pandas DataFrame created from this dataset, containing a limited + number of rows. + + Raises: + ValueError: if the number of rows in the :class:`~ray.data.Dataset` exceeds + ``limit``. + """ + if limit is not None: + count = self.count() + if count > limit: + raise ValueError( + f"the dataset has more than the given limit of {limit} " + f"rows: {count}. If you are sure that a DataFrame with " + f"{count} rows will fit in local memory, set " + "ds.to_pandas(limit=None) to disable limits." + ) + + builder = PandasBlockBuilder() + for batch in self.iter_batches(batch_format="pandas", batch_size=None): + builder.add_block(batch) + block = builder.build() + + # `PandasBlockBuilder` creates a dataframe with internal extension types like + # 'TensorDtype'. We use the `to_pandas` method to convert these extension + # types to regular types. + return BlockAccessor.for_block(block).to_pandas() + + @ConsumptionAPI(pattern="Time complexity:") + @DeveloperAPI + def to_pandas_refs(self) -> List[ObjectRef["pandas.DataFrame"]]: + """Converts this :class:`~ray.data.Dataset` into a distributed set of Pandas + dataframes. + + One DataFrame is created for each block in this Dataset. + + This function induces a copy of the data. For zero-copy access to the + underlying data, consider using :meth:`Dataset.to_arrow_refs` or + :meth:`Dataset.iter_internal_ref_bundles`. + + Examples: + >>> import ray + >>> ds = ray.data.range(10, override_num_blocks=2) + >>> refs = ds.to_pandas_refs() + >>> len(refs) + 2 + + Time complexity: O(dataset size / parallelism) + + Returns: + A list of remote pandas DataFrames created from this dataset. + """ + + block_to_df = cached_remote_fn(_block_to_df) + pandas_refs = [] + for bundle in self.iter_internal_ref_bundles(): + for block_ref in bundle.block_refs: + pandas_refs.append(block_to_df.remote(block_ref)) + return pandas_refs + + @DeveloperAPI + def to_numpy_refs( + self, *, column: Optional[str] = None + ) -> List[ObjectRef[np.ndarray]]: + """Converts this :class:`~ray.data.Dataset` into a distributed set of NumPy + ndarrays or dictionary of NumPy ndarrays. + + This is only supported for datasets convertible to NumPy ndarrays. + This function induces a copy of the data. For zero-copy access to the + underlying data, consider using :meth:`Dataset.to_arrow_refs` or + :meth:`Dataset.iter_internal_ref_bundles`. + + Examples: + >>> import ray + >>> ds = ray.data.range(10, override_num_blocks=2) + >>> refs = ds.to_numpy_refs() + >>> len(refs) + 2 + + Time complexity: O(dataset size / parallelism) + + Args: + column: The name of the column to convert to numpy. If ``None``, all columns + are used. If multiple columns are specified, each returned + future represents a dict of ndarrays. Defaults to None. + + Returns: + A list of remote NumPy ndarrays created from this dataset. + """ + block_to_ndarray = cached_remote_fn(_block_to_ndarray) + numpy_refs = [] + for bundle in self.iter_internal_ref_bundles(): + for block_ref in bundle.block_refs: + numpy_refs.append(block_to_ndarray.remote(block_ref, column=column)) + return numpy_refs + + @ConsumptionAPI(pattern="Time complexity:") + @DeveloperAPI + def to_arrow_refs(self) -> List[ObjectRef["pyarrow.Table"]]: + """Convert this :class:`~ray.data.Dataset` into a distributed set of PyArrow + tables. + + One PyArrow table is created for each block in this Dataset. + + This method is only supported for datasets convertible to PyArrow tables. + This function is zero-copy if the existing data is already in PyArrow + format. Otherwise, the data is converted to PyArrow format. + + Examples: + >>> import ray + >>> ds = ray.data.range(10, override_num_blocks=2) + >>> refs = ds.to_arrow_refs() + >>> len(refs) + 2 + + Time complexity: O(1) unless conversion is required. + + Returns: + A list of remote PyArrow tables created from this dataset. + """ + import pyarrow as pa + + ref_bundles: Iterator[RefBundle] = self.iter_internal_ref_bundles() + block_refs: List[ + ObjectRef["pyarrow.Table"] + ] = _ref_bundles_iterator_to_block_refs_list(ref_bundles) + # Schema is safe to call since we have already triggered execution with + # iter_internal_ref_bundles. + schema = self.schema(fetch_if_missing=True) + if isinstance(schema, Schema): + schema = schema.base_schema + if isinstance(schema, pa.Schema): + # Zero-copy path. + return block_refs + + block_to_arrow = cached_remote_fn(_block_to_arrow) + return [block_to_arrow.remote(block) for block in block_refs] + + @ConsumptionAPI(pattern="Args:") + def to_random_access_dataset( + self, + key: str, + num_workers: Optional[int] = None, + ) -> RandomAccessDataset: + """Convert this dataset into a distributed RandomAccessDataset (EXPERIMENTAL). + + RandomAccessDataset partitions the dataset across the cluster by the given + sort key, providing efficient random access to records via binary search. A + number of worker actors are created, each of which has zero-copy access to the + underlying sorted data blocks of the dataset. + + Note that the key must be unique in the dataset. If there are duplicate keys, + an arbitrary value is returned. + + This is only supported for Arrow-format datasets. + + Args: + key: The key column over which records can be queried. + num_workers: The number of actors to use to serve random access queries. + By default, this is determined by multiplying the number of Ray nodes + in the cluster by four. As a rule of thumb, you can expect each worker + to provide ~3000 records / second via ``get_async()``, and + ~10000 records / second via ``multiget()``. + """ + if num_workers is None: + num_workers = 4 * len(ray.nodes()) + return RandomAccessDataset(self, key, num_workers=num_workers) + + @ConsumptionAPI(pattern="store memory.", insert_after=True) + @PublicAPI(api_group=E_API_GROUP) + def materialize(self) -> "MaterializedDataset": + """Execute and materialize this dataset into object store memory. + + This can be used to read all blocks into memory. By default, Dataset + doesn't read blocks from the datasource until the first transform. + + Note that this does not mutate the original Dataset. Only the blocks of the + returned MaterializedDataset class are pinned in memory. + + Examples: + >>> import ray + >>> ds = ray.data.range(10) + >>> materialized_ds = ds.materialize() + >>> materialized_ds + MaterializedDataset(num_blocks=..., num_rows=10, schema={id: int64}) + + Returns: + A MaterializedDataset holding the materialized data blocks. + """ + copy = Dataset.copy(self, _deep_copy=True, _as=MaterializedDataset) + copy._plan.execute() + + bundle = copy._plan._snapshot_bundle + blocks_with_metadata = bundle.blocks + # TODO(hchen): Here we generate the same number of blocks as + # the original Dataset. Because the old code path does this, and + # some unit tests implicily depend on this behavior. + # After we remove the old code path, we should consider merging + # some blocks for better perf. + ref_bundles = [ + RefBundle( + blocks=[block_with_metadata], + owns_blocks=False, + ) + for block_with_metadata in blocks_with_metadata + ] + logical_plan = LogicalPlan(InputData(input_data=ref_bundles), self.context) + output = MaterializedDataset( + ExecutionPlan(copy._plan.stats()), + logical_plan, + ) + # Metrics are tagged with `copy`s uuid, update the output uuid with + # this so the user can access the metrics label. + output._set_name(copy._name) + output._set_uuid(copy._get_uuid()) + output._plan.execute() # No-op that marks the plan as fully executed. + return output + + @PublicAPI(api_group=IM_API_GROUP) + def stats(self) -> str: + """Returns a string containing execution timing information. + + Note that this does not trigger execution, so if the dataset has not yet + executed, an empty string is returned. + + Examples: + + .. testcode:: + + import ray + + ds = ray.data.range(10) + assert ds.stats() == "" + + ds = ds.materialize() + print(ds.stats()) + + .. testoutput:: + :options: +MOCK + + Operator 0 Read: 1 tasks executed, 5 blocks produced in 0s + * Remote wall time: 16.29us min, 7.29ms max, 1.21ms mean, 24.17ms total + * Remote cpu time: 16.0us min, 2.54ms max, 810.45us mean, 16.21ms total + * Peak heap memory usage (MiB): 137968.75 min, 142734.38 max, 139846 mean + * Output num rows: 0 min, 1 max, 0 mean, 10 total + * Output size bytes: 0 min, 8 max, 4 mean, 80 total + * Tasks per node: 20 min, 20 max, 20 mean; 1 nodes used + + """ + if self._current_executor: + return self._current_executor.get_stats().to_summary().to_string() + elif self._write_ds is not None and self._write_ds._plan.has_computed_output(): + return self._write_ds.stats() + return self._get_stats_summary().to_string() + + def _get_stats_summary(self) -> DatasetStatsSummary: + return self._plan.stats().to_summary() + + @ConsumptionAPI(pattern="Examples:") + @DeveloperAPI + def iter_internal_ref_bundles(self) -> Iterator[RefBundle]: + """Get an iterator over ``RefBundles`` + belonging to this Dataset. Calling this function doesn't keep + the data materialized in-memory. + + Examples: + >>> import ray + >>> ds = ray.data.range(1) + >>> for ref_bundle in ds.iter_internal_ref_bundles(): + ... for block_ref, block_md in ref_bundle.blocks: + ... block = ray.get(block_ref) + + Returns: + An iterator over this Dataset's ``RefBundles``. + """ + + iter_ref_bundles, _, _ = self._plan.execute_to_iterator() + self._synchronize_progress_bar() + return iter_ref_bundles + + @Deprecated + @ConsumptionAPI(pattern="Examples:") + def get_internal_block_refs(self) -> List[ObjectRef[Block]]: + """Get a list of references to the underlying blocks of this dataset. + + This function can be used for zero-copy access to the data. It blocks + until the underlying blocks are computed. + + Examples: + >>> import ray + >>> ds = ray.data.range(1) + >>> ds.get_internal_block_refs() + [ObjectRef(...)] + + Returns: + A list of references to this dataset's blocks. + """ + logger.warning( + "`Dataset.get_internal_block_refs()` is deprecated. Use " + "`Dataset.iter_internal_ref_bundles()` instead.", + ) + block_refs = self._plan.execute().block_refs + self._synchronize_progress_bar() + return block_refs + + @DeveloperAPI + def has_serializable_lineage(self) -> bool: + """Whether this dataset's lineage is able to be serialized for storage and + later deserialized, possibly on a different cluster. + + Only datasets that are created from data that we know will still exist at + deserialization time, e.g. data external to this Ray cluster such as persistent + cloud object stores, support lineage-based serialization. All of the + ray.data.read_*() APIs support lineage-based serialization. + + Examples: + + >>> import ray + >>> ray.data.from_items(list(range(10))).has_serializable_lineage() + False + >>> ray.data.read_csv("s3://anonymous@ray-example-data/iris.csv").has_serializable_lineage() + True + """ # noqa: E501 + return all( + op.is_lineage_serializable() + for op in self._logical_plan.dag.post_order_iter() + ) + + @DeveloperAPI + def serialize_lineage(self) -> bytes: + """ + Serialize this dataset's lineage, not the actual data or the existing data + futures, to bytes that can be stored and later deserialized, possibly on a + different cluster. + + Note that this uses pickle and will drop all computed data, and that everything + is recomputed from scratch after deserialization. + + Use :py:meth:`Dataset.deserialize_lineage` to deserialize the serialized + bytes returned from this method into a Dataset. + + .. note:: + Unioned and zipped datasets, produced by :py:meth`Dataset.union` and + :py:meth:`Dataset.zip`, are not lineage-serializable. + + Examples: + + .. testcode:: + + import ray + + ds = ray.data.read_csv("s3://anonymous@ray-example-data/iris.csv") + serialized_ds = ds.serialize_lineage() + ds = ray.data.Dataset.deserialize_lineage(serialized_ds) + print(ds) + + .. testoutput:: + + Dataset( + num_rows=?, + schema={ + sepal length (cm): double, + sepal width (cm): double, + petal length (cm): double, + petal width (cm): double, + target: int64 + } + ) + + + Returns: + Serialized bytes containing the lineage of this dataset. + """ + if not self.has_serializable_lineage(): + raise ValueError( + "Lineage-based serialization is not supported for this stream, which " + "means that it cannot be used as a tunable hyperparameter. " + "Lineage-based serialization is explicitly NOT supported for unioned " + "or zipped datasets (see docstrings for those methods), and is only " + "supported for datasets created from data that we know will still " + "exist at deserialization time, e.g. external data in persistent cloud " + "object stores or in-memory data from long-lived clusters. Concretely, " + "all ray.data.read_*() APIs should support lineage-based " + "serialization, while all of the ray.data.from_*() APIs do not. To " + "allow this stream to be serialized to storage, write the data to an " + "external store (such as AWS S3, GCS, or Azure Blob Storage) using the " + "Dataset.write_*() APIs, and serialize a new dataset reading " + "from the external store using the ray.data.read_*() APIs." + ) + # Copy Dataset and clear the blocks from the execution plan so only the + # Dataset's lineage is serialized. + plan_copy = self._plan.deep_copy() + logical_plan_copy = copy.copy(self._plan._logical_plan) + ds = Dataset(plan_copy, logical_plan_copy) + ds._plan.clear_snapshot() + ds._set_uuid(self._get_uuid()) + + def _reduce_remote_fn(rf: ray.remote_function.RemoteFunction): + # Custom reducer for Ray remote function handles that allows for + # cross-cluster serialization. + # This manually unsets the last export session and job to force re-exporting + # of the function when the handle is deserialized on a new cluster. + # TODO(Clark): Fix this in core Ray, see issue: + # https://github.com/ray-project/ray/issues/24152. + reconstructor, args, state = rf.__reduce__() + state["_last_export_session_and_job"] = None + return reconstructor, args, state + + context = ray._private.worker.global_worker.get_serialization_context() + try: + context._register_cloudpickle_reducer( + ray.remote_function.RemoteFunction, _reduce_remote_fn + ) + serialized = pickle.dumps(ds) + finally: + context._unregister_cloudpickle_reducer(ray.remote_function.RemoteFunction) + return serialized + + @staticmethod + @DeveloperAPI + def deserialize_lineage(serialized_ds: bytes) -> "Dataset": + """ + Deserialize the provided lineage-serialized Dataset. + + This uses pickle, and assumes that the provided serialized bytes were + serialized using :py:meth:`Dataset.serialize_lineage`. + + Examples: + + .. testcode:: + + import ray + + ds = ray.data.read_csv("s3://anonymous@ray-example-data/iris.csv") + serialized_ds = ds.serialize_lineage() + ds = ray.data.Dataset.deserialize_lineage(serialized_ds) + print(ds) + + .. testoutput:: + + Dataset( + num_rows=?, + schema={ + sepal length (cm): double, + sepal width (cm): double, + petal length (cm): double, + petal width (cm): double, + target: int64 + } + ) + + Args: + serialized_ds: The serialized Dataset that we wish to deserialize. + + Returns: + A deserialized ``Dataset`` instance. + """ + return pickle.loads(serialized_ds) + + @property + @DeveloperAPI + def context(self) -> DataContext: + """Return the DataContext used to create this Dataset.""" + return self._plan._context + + def _aggregate_on( + self, agg_cls: type, on: Optional[Union[str, List[str]]], *args, **kwargs + ): + """Helper for aggregating on a particular subset of the dataset. + + This validates the `on` argument, and converts a list of column names + or lambdas to a multi-aggregation. A null `on` results in a + multi-aggregation on all columns for an Arrow Dataset, and a single + aggregation on the entire row for a simple Dataset. + """ + aggs = self._build_multicolumn_aggs(agg_cls, on, *args, **kwargs) + return self.aggregate(*aggs) + + def _build_multicolumn_aggs( + self, + agg_cls: type, + on: Optional[Union[str, List[str]]], + ignore_nulls: bool, + *args, + skip_cols: Optional[List[str]] = None, + **kwargs, + ): + """Build set of aggregations for applying a single aggregation to + multiple columns. + """ + # Expand None into an aggregation for each column. + if on is None: + schema = self.schema(fetch_if_missing=True) + if schema is not None and not isinstance(schema, type): + if not skip_cols: + skip_cols = [] + if len(schema.names) > 0: + on = [col for col in schema.names if col not in skip_cols] + + if not isinstance(on, list): + on = [on] + return [agg_cls(on_, *args, ignore_nulls=ignore_nulls, **kwargs) for on_ in on] + + def _aggregate_result(self, result: Union[Tuple, Mapping]) -> U: + if result is not None and len(result) == 1: + if isinstance(result, tuple): + return result[0] + else: + # NOTE (kfstorm): We cannot call `result[0]` directly on + # `PandasRow` because indexing a column with position is not + # supported by pandas. + return list(result.values())[0] + else: + return result + + @repr_with_fallback(["ipywidgets", "8"]) + def _repr_mimebundle_(self, **kwargs): + """Return a mimebundle with an ipywidget repr and a simple text repr. + + Depending on the frontend where the data is being displayed, + different mimetypes are used from this bundle. + See https://ipython.readthedocs.io/en/stable/config/integrating.html + for information about this method, and + https://ipywidgets.readthedocs.io/en/latest/embedding.html + for more information about the jupyter widget mimetype. + + Returns: + A mimebundle containing an ipywidget repr and a simple text repr. + """ + import ipywidgets + + title = ipywidgets.HTML(f"

{self.__class__.__name__}

") + tab = self._tab_repr_() + widget = ipywidgets.VBox([title, tab], layout=ipywidgets.Layout(width="100%")) + + # Get the widget mime bundle, but replace the plaintext + # with the Datastream repr + bundle = widget._repr_mimebundle_(**kwargs) + bundle.update( + { + "text/plain": repr(self), + } + ) + return bundle + + def _tab_repr_(self): + from ipywidgets import HTML, Tab + + metadata = { + "num_blocks": self._plan.initial_num_blocks(), + "num_rows": self._meta_count(), + } + # Show metadata if available, but don't trigger execution. + schema = self.schema(fetch_if_missing=False) + if schema is None: + schema_repr = Template("rendered_html_common.html.j2").render( + content="
Unknown schema
" + ) + elif isinstance(schema, type): + schema_repr = Template("rendered_html_common.html.j2").render( + content=f"
Data type: {html.escape(str(schema))}
" + ) + else: + schema_data = {} + for sname, stype in zip(schema.names, schema.types): + schema_data[sname] = getattr(stype, "__name__", str(stype)) + + schema_repr = Template("scrollableTable.html.j2").render( + table=tabulate( + tabular_data=schema_data.items(), + tablefmt="html", + showindex=False, + headers=["Name", "Type"], + ), + max_height="300px", + ) + + children = [] + children.append( + HTML( + Template("scrollableTable.html.j2").render( + table=tabulate( + tabular_data=metadata.items(), + tablefmt="html", + showindex=False, + headers=["Field", "Value"], + ), + max_height="300px", + ) + ) + ) + children.append(HTML(schema_repr)) + return Tab(children, titles=["Metadata", "Schema"]) + + def __repr__(self) -> str: + return self._plan.get_plan_as_string(self.__class__) + + def __str__(self) -> str: + return repr(self) + + def __bool__(self) -> bool: + # Prevents `__len__` from being called to check if it is None + # see: issue #25152 + return True + + def __len__(self) -> int: + raise AttributeError( + "Use `ds.count()` to compute the length of a distributed Dataset. " + "This may be an expensive operation." + ) + + def __iter__(self): + raise TypeError( + "`Dataset` objects aren't iterable. To iterate records, call " + "`ds.iter_rows()` or `ds.iter_batches()`. For more information, read " + "https://docs.ray.io/en/latest/data/iterating-over-data.html." + ) + + def _block_num_rows(self) -> List[int]: + get_num_rows = cached_remote_fn(_get_num_rows) + num_rows = [] + for ref_bundle in self.iter_internal_ref_bundles(): + for block_ref in ref_bundle.block_refs: + num_rows.append(get_num_rows.remote(block_ref)) + return ray.get(num_rows) + + def _meta_count(self) -> Optional[int]: + return self._plan.meta_count() + + def _get_uuid(self) -> str: + return self._uuid + + def _set_uuid(self, uuid: str) -> None: + self._uuid = uuid + self._plan._dataset_uuid = uuid + self._plan._in_stats.dataset_uuid = uuid + + def _synchronize_progress_bar(self): + """Flush progress bar output by shutting down the current executor. + + This should be called at the end of all blocking APIs (e.g., `take`), but not + async APIs (e.g., `iter_batches`). + + The streaming executor runs in a separate generator / thread, so it is + possible the shutdown logic runs even after a call to retrieve rows from the + stream has finished. Explicit shutdown avoids this, which can clobber console + output (https://github.com/ray-project/ray/issues/32414). + """ + if self._current_executor: + self._current_executor.shutdown() + self._current_executor = None + + def __getstate__(self): + # Note: excludes _current_executor which is not serializable. + return { + "plan": self._plan, + "uuid": self._uuid, + "logical_plan": self._logical_plan, + } + + def __setstate__(self, state): + self._plan = state["plan"] + self._uuid = state["uuid"] + self._logical_plan = state["logical_plan"] + self._current_executor = None + + def __del__(self): + if not self._current_executor: + return + + # When Python shuts down, `ray` might evaluate to ``. + # This value is truthy and not `None`, so we use a try-catch in addition to + # `if ray is not None`. For more information, see #42382. + try: + if ray is not None and ray.is_initialized(): + self._current_executor.shutdown() + except TypeError: + pass + + +@PublicAPI +class MaterializedDataset(Dataset, Generic[T]): + """A Dataset materialized in Ray memory, e.g., via `.materialize()`. + + The blocks of a MaterializedDataset object are materialized into Ray object store + memory, which means that this class can be shared or iterated over by multiple Ray + tasks without re-executing the underlying computations for producing the stream. + """ + + def num_blocks(self) -> int: + """Return the number of blocks of this :class:`MaterializedDataset`. + + Examples: + >>> import ray + >>> ds = ray.data.range(100).repartition(10).materialize() + >>> ds.num_blocks() + 10 + + Time complexity: O(1) + + Returns: + The number of blocks of this :class:`Dataset`. + """ + return self._plan.initial_num_blocks() + + +@PublicAPI(stability="beta") +class Schema: + """Dataset schema. + + Attributes: + base_schema: The underlying Arrow or Pandas schema. + """ + + def __init__( + self, + base_schema: Union["pyarrow.lib.Schema", "PandasBlockSchema"], + *, + data_context: Optional[DataContext] = None, + ): + self.base_schema = base_schema + + # Snapshot the current context, so that the config of Datasets is always + # determined by the config at the time it was created. + self._context = data_context or copy.deepcopy(DataContext.get_current()) + + @property + def names(self) -> List[str]: + """Lists the columns of this Dataset.""" + return self.base_schema.names + + @property + def types(self) -> List[Union[type[object], "pyarrow.lib.DataType"]]: + """Lists the types of this Dataset in Arrow format + + For non-Arrow compatible types, we return "object". + """ + import pyarrow as pa + + from ray.data.extensions import ArrowTensorType, TensorDtype + + if isinstance(self.base_schema, pa.lib.Schema): + return list(self.base_schema.types) + + arrow_types = [] + for dtype in self.base_schema.types: + if isinstance(dtype, TensorDtype): + + if self._context.use_arrow_tensor_v2: + pa_tensor_type_class = ArrowTensorTypeV2 + else: + pa_tensor_type_class = ArrowTensorType + + # Manually convert our Pandas tensor extension type to Arrow. + arrow_types.append( + pa_tensor_type_class( + shape=dtype._shape, dtype=pa.from_numpy_dtype(dtype._dtype) + ) + ) + + else: + try: + arrow_types.append(pa.from_numpy_dtype(dtype)) + except pa.ArrowNotImplementedError: + arrow_types.append(object) + except Exception: + logger.exception(f"Error converting dtype {dtype} to Arrow.") + arrow_types.append(None) + return arrow_types + + def __eq__(self, other): + return ( + isinstance(other, Schema) + and other.types == self.types + and other.names == self.names + ) + + def __repr__(self): + column_width = max([len(name) for name in self.names] + [len("Column")]) + padding = 2 + + output = "Column" + output += " " * ((column_width + padding) - len("Column")) + output += "Type\n" + + output += "-" * len("Column") + output += " " * ((column_width + padding) - len("Column")) + output += "-" * len("Type") + "\n" + + for name, type in zip(self.names, self.types): + output += name + output += " " * ((column_width + padding) - len(name)) + output += f"{type}\n" + + output = output.rstrip() + return output + + +def _block_to_df(block: Block) -> "pandas.DataFrame": + block = BlockAccessor.for_block(block) + return block.to_pandas() + + +def _block_to_ndarray(block: Block, column: Optional[str]): + block = BlockAccessor.for_block(block) + return block.to_numpy(column) + + +def _block_to_arrow(block: Block): + block = BlockAccessor.for_block(block) + return block.to_arrow() diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/grouped_data.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/grouped_data.py new file mode 100644 index 0000000000000000000000000000000000000000..8f7b7dde118ddbd54705b89c2589b9dd32f48dab --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/grouped_data.py @@ -0,0 +1,517 @@ +from typing import Any, Dict, Iterable, List, Optional, Tuple, Union + +from ray.data._internal.aggregate import Count, Max, Mean, Min, Std, Sum +from ray.data._internal.compute import ComputeStrategy +from ray.data._internal.logical.interfaces import LogicalPlan +from ray.data._internal.logical.operators.all_to_all_operator import Aggregate +from ray.data.aggregate import AggregateFn +from ray.data.block import BlockAccessor, CallableClass, UserDefinedFunction +from ray.data.dataset import DataBatch, Dataset +from ray.util.annotations import PublicAPI + +CDS_API_GROUP = "Computations or Descriptive Stats" +FA_API_GROUP = "Function Application" + + +class _MultiColumnSortedKey: + """Represents a tuple of group keys with a ``__lt__`` method + + This is a simple implementation to support multi-column groupby. + While a 1D array of tuples suffices to maintain the lexicographical + sorted order, a comparison method is also needed in ``np.searchsorted`` + (for computing the group key boundaries). + """ + + __slots__ = ("data",) + + def __init__(self, *args): + self.data = tuple(args) + + def __lt__(self, obj: "_MultiColumnSortedKey") -> bool: + return self.data < obj.data + + def __repr__(self) -> str: + """Print as T(1, 2)""" + return "T" + self.data.__repr__() + + +class GroupedData: + """Represents a grouped dataset created by calling ``Dataset.groupby()``. + + The actual groupby is deferred until an aggregation is applied. + """ + + def __init__( + self, + dataset: Dataset, + key: Union[str, List[str]], + ): + """Construct a dataset grouped by key (internal API). + + The constructor is not part of the GroupedData API. + Use the ``Dataset.groupby()`` method to construct one. + """ + self._dataset = dataset + self._key = key + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}(dataset={self._dataset}, " f"key={self._key!r})" + ) + + @PublicAPI(api_group=FA_API_GROUP) + def aggregate(self, *aggs: AggregateFn) -> Dataset: + """Implements an accumulator-based aggregation. + + Args: + aggs: Aggregations to do. + + Returns: + The output is an dataset of ``n + 1`` columns where the first column + is the groupby key and the second through ``n + 1`` columns are the + results of the aggregations. + If groupby key is ``None`` then the key part of return is omitted. + """ + + plan = self._dataset._plan.copy() + op = Aggregate( + self._dataset._logical_plan.dag, + key=self._key, + aggs=aggs, + ) + logical_plan = LogicalPlan(op, self._dataset.context) + return Dataset( + plan, + logical_plan, + ) + + def _aggregate_on( + self, + agg_cls: type, + on: Union[str, List[str]], + ignore_nulls: bool, + *args, + **kwargs, + ): + """Helper for aggregating on a particular subset of the dataset. + + This validates the `on` argument, and converts a list of column names + to a multi-aggregation. A null `on` results in a + multi-aggregation on all columns for an Arrow Dataset, and a single + aggregation on the entire row for a simple Dataset. + """ + aggs = self._dataset._build_multicolumn_aggs( + agg_cls, on, ignore_nulls, *args, skip_cols=self._key, **kwargs + ) + return self.aggregate(*aggs) + + @PublicAPI(api_group=FA_API_GROUP) + def map_groups( + self, + fn: UserDefinedFunction[DataBatch, DataBatch], + *, + compute: Union[str, ComputeStrategy] = None, + batch_format: Optional[str] = "default", + fn_args: Optional[Iterable[Any]] = None, + fn_kwargs: Optional[Dict[str, Any]] = None, + fn_constructor_args: Optional[Iterable[Any]] = None, + fn_constructor_kwargs: Optional[Dict[str, Any]] = None, + num_cpus: Optional[float] = None, + num_gpus: Optional[float] = None, + concurrency: Optional[Union[int, Tuple[int, int]]] = None, + **ray_remote_args, + ) -> "Dataset": + """Apply the given function to each group of records of this dataset. + + While map_groups() is very flexible, note that it comes with downsides: + * It may be slower than using more specific methods such as min(), max(). + * It requires that each group fits in memory on a single node. + + In general, prefer to use aggregate() instead of map_groups(). + + .. warning:: + Specifying both ``num_cpus`` and ``num_gpus`` for map tasks is experimental, + and may result in scheduling or stability issues. Please + `report any issues `_ + to the Ray team. + + Examples: + >>> # Return a single record per group (list of multiple records in, + >>> # list of a single record out). + >>> import ray + >>> import pandas as pd + >>> import numpy as np + >>> # Get first value per group. + >>> ds = ray.data.from_items([ # doctest: +SKIP + ... {"group": 1, "value": 1}, + ... {"group": 1, "value": 2}, + ... {"group": 2, "value": 3}, + ... {"group": 2, "value": 4}]) + >>> ds.groupby("group").map_groups( # doctest: +SKIP + ... lambda g: {"result": np.array([g["value"][0]])}) + + >>> # Return multiple records per group (dataframe in, dataframe out). + >>> df = pd.DataFrame( + ... {"A": ["a", "a", "b"], "B": [1, 1, 3], "C": [4, 6, 5]} + ... ) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> grouped = ds.groupby("A") # doctest: +SKIP + >>> grouped.map_groups( # doctest: +SKIP + ... lambda g: g.apply( + ... lambda c: c / g[c.name].sum() if c.name in ["B", "C"] else c + ... ) + ... ) # doctest: +SKIP + + Args: + fn: The function to apply to each group of records, or a class type + that can be instantiated to create such a callable. It takes as + input a batch of all records from a single group, and returns a + batch of zero or more records, similar to map_batches(). + compute: The compute strategy, either "tasks" (default) to use Ray + tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor + pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an + autoscaling actor pool. + batch_format: Specify ``"default"`` to use the default block format + (NumPy), ``"pandas"`` to select ``pandas.DataFrame``, "pyarrow" to + select ``pyarrow.Table``, or ``"numpy"`` to select + ``Dict[str, numpy.ndarray]``, or None to return the underlying block + exactly as is with no additional formatting. + fn_args: Arguments to `fn`. + fn_kwargs: Keyword arguments to `fn`. + fn_constructor_args: Positional arguments to pass to ``fn``'s constructor. + You can only provide this if ``fn`` is a callable class. These arguments + are top-level arguments in the underlying Ray actor construction task. + fn_constructor_kwargs: Keyword arguments to pass to ``fn``'s constructor. + This can only be provided if ``fn`` is a callable class. These arguments + are top-level arguments in the underlying Ray actor construction task. + num_cpus: The number of CPUs to reserve for each parallel map worker. + num_gpus: The number of GPUs to reserve for each parallel map worker. For + example, specify `num_gpus=1` to request 1 GPU for each parallel map + worker. + ray_remote_args: Additional resource requirements to request from + ray (e.g., num_gpus=1 to request GPUs for the map tasks). + + Returns: + The return type is determined by the return type of ``fn``, and the return + value is combined from results of all groups. + """ + # Globally sort records by key. + # Note that sort() will ensure that records of the same key partitioned + # into the same block. + if self._key is not None: + sorted_ds = self._dataset.sort(self._key) + else: + sorted_ds = self._dataset.repartition(1) + + def get_key_boundaries(block_accessor: BlockAccessor) -> List[int]: + """Compute block boundaries based on the key(s)""" + + import numpy as np + + # Get the keys of the batch in numpy array format + keys = block_accessor.to_numpy(self._key) + + if isinstance(keys, dict): + # For multiple keys, we generate a separate tuple column + convert_to_multi_column_sorted_key = np.vectorize(_MultiColumnSortedKey) + keys: np.ndarray = convert_to_multi_column_sorted_key(*keys.values()) + + boundaries = [] + start = 0 + while start < keys.size: + end = start + np.searchsorted(keys[start:], keys[start], side="right") + boundaries.append(end) + start = end + return boundaries + + # The batch is the entire block, because we have batch_size=None for + # map_batches() below. + def apply_udf_to_groups(udf, batch, *args, **kwargs): + block = BlockAccessor.batch_to_block(batch) + block_accessor = BlockAccessor.for_block(block) + if self._key: + boundaries = get_key_boundaries(block_accessor) + else: + boundaries = [block_accessor.num_rows()] + start = 0 + for end in boundaries: + group_block = block_accessor.slice(start, end) + group_block_accessor = BlockAccessor.for_block(group_block) + # Convert block of each group to batch format here, because the + # block format here can be different from batch format + # (e.g. block is Arrow format, and batch is NumPy format). + group_batch = group_block_accessor.to_batch_format(batch_format) + applied = udf(group_batch, *args, **kwargs) + yield applied + start = end + + if isinstance(fn, CallableClass): + + class wrapped_fn: + def __init__(self, *args, **kwargs): + self.fn = fn(*args, **kwargs) + + def __call__(self, batch, *args, **kwargs): + yield from apply_udf_to_groups(self.fn, batch, *args, **kwargs) + + else: + + def wrapped_fn(batch, *args, **kwargs): + yield from apply_udf_to_groups(fn, batch, *args, **kwargs) + + # Change the name of the wrapped function so that users see the name of their + # function rather than `wrapped_fn` in the progress bar. + wrapped_fn.__name__ = fn.__name__ + + # Note we set batch_size=None here, so it will use the entire block as a batch, + # which ensures that each group will be contained within a batch in entirety. + return sorted_ds._map_batches_without_batch_size_validation( + wrapped_fn, + batch_size=None, + compute=compute, + batch_format=batch_format, + zero_copy_batch=False, + fn_args=fn_args, + fn_kwargs=fn_kwargs, + fn_constructor_args=fn_constructor_args, + fn_constructor_kwargs=fn_constructor_kwargs, + num_cpus=num_cpus, + num_gpus=num_gpus, + concurrency=concurrency, + ray_remote_args_fn=None, + **ray_remote_args, + ) + + @PublicAPI(api_group=CDS_API_GROUP) + def count(self) -> Dataset: + """Compute count aggregation. + + Examples: + >>> import ray + >>> ray.data.from_items([ # doctest: +SKIP + ... {"A": x % 3, "B": x} for x in range(100)]).groupby( # doctest: +SKIP + ... "A").count() # doctest: +SKIP + + Returns: + A dataset of ``[k, v]`` columns where ``k`` is the groupby key and + ``v`` is the number of rows with that key. + If groupby key is ``None`` then the key part of return is omitted. + """ + return self.aggregate(Count()) + + @PublicAPI(api_group=CDS_API_GROUP) + def sum( + self, on: Union[str, List[str]] = None, ignore_nulls: bool = True + ) -> Dataset: + r"""Compute grouped sum aggregation. + + Examples: + >>> import ray + >>> ray.data.from_items([ # doctest: +SKIP + ... (i % 3, i, i**2) # doctest: +SKIP + ... for i in range(100)]) \ # doctest: +SKIP + ... .groupby(lambda x: x[0] % 3) \ # doctest: +SKIP + ... .sum(lambda x: x[2]) # doctest: +SKIP + >>> ray.data.range(100).groupby("id").sum() # doctest: +SKIP + >>> ray.data.from_items([ # doctest: +SKIP + ... {"A": i % 3, "B": i, "C": i**2} # doctest: +SKIP + ... for i in range(100)]) \ # doctest: +SKIP + ... .groupby("A") \ # doctest: +SKIP + ... .sum(["B", "C"]) # doctest: +SKIP + + Args: + on: a column name or a list of column names to aggregate. + ignore_nulls: Whether to ignore null values. If ``True``, null + values will be ignored when computing the sum; if ``False``, + if a null value is encountered, the output will be null. + We consider np.nan, None, and pd.NaT to be null values. + Default is ``True``. + + Returns: + The sum result. + + For different values of ``on``, the return varies: + + - ``on=None``: a dataset containing a groupby key column, + ``"k"``, and a column-wise sum column for each original column + in the dataset. + - ``on=["col_1", ..., "col_n"]``: a dataset of ``n + 1`` + columns where the first column is the groupby key and the second + through ``n + 1`` columns are the results of the aggregations. + + If groupby key is ``None`` then the key part of return is omitted. + """ + return self._aggregate_on(Sum, on, ignore_nulls) + + @PublicAPI(api_group=CDS_API_GROUP) + def min( + self, on: Union[str, List[str]] = None, ignore_nulls: bool = True + ) -> Dataset: + """Compute grouped min aggregation. + + Examples: + >>> import ray + >>> ray.data.le(100).groupby("value").min() # doctest: +SKIP + >>> ray.data.from_items([ # doctest: +SKIP + ... {"A": i % 3, "B": i, "C": i**2} # doctest: +SKIP + ... for i in range(100)]) \ # doctest: +SKIP + ... .groupby("A") \ # doctest: +SKIP + ... .min(["B", "C"]) # doctest: +SKIP + + Args: + on: a column name or a list of column names to aggregate. + ignore_nulls: Whether to ignore null values. If ``True``, null + values will be ignored when computing the min; if ``False``, + if a null value is encountered, the output will be null. + We consider np.nan, None, and pd.NaT to be null values. + Default is ``True``. + + Returns: + The min result. + + For different values of ``on``, the return varies: + + - ``on=None``: a dataset containing a groupby key column, + ``"k"``, and a column-wise min column for each original column in + the dataset. + - ``on=["col_1", ..., "col_n"]``: a dataset of ``n + 1`` + columns where the first column is the groupby key and the second + through ``n + 1`` columns are the results of the aggregations. + + If groupby key is ``None`` then the key part of return is omitted. + """ + return self._aggregate_on(Min, on, ignore_nulls) + + @PublicAPI(api_group=CDS_API_GROUP) + def max( + self, on: Union[str, List[str]] = None, ignore_nulls: bool = True + ) -> Dataset: + """Compute grouped max aggregation. + + Examples: + >>> import ray + >>> ray.data.le(100).groupby("value").max() # doctest: +SKIP + >>> ray.data.from_items([ # doctest: +SKIP + ... {"A": i % 3, "B": i, "C": i**2} # doctest: +SKIP + ... for i in range(100)]) \ # doctest: +SKIP + ... .groupby("A") \ # doctest: +SKIP + ... .max(["B", "C"]) # doctest: +SKIP + + Args: + on: a column name or a list of column names to aggregate. + ignore_nulls: Whether to ignore null values. If ``True``, null + values will be ignored when computing the max; if ``False``, + if a null value is encountered, the output will be null. + We consider np.nan, None, and pd.NaT to be null values. + Default is ``True``. + + Returns: + The max result. + + For different values of ``on``, the return varies: + + - ``on=None``: a dataset containing a groupby key column, + ``"k"``, and a column-wise max column for each original column in + the dataset. + - ``on=["col_1", ..., "col_n"]``: a dataset of ``n + 1`` + columns where the first column is the groupby key and the second + through ``n + 1`` columns are the results of the aggregations. + + If groupby key is ``None`` then the key part of return is omitted. + """ + return self._aggregate_on(Max, on, ignore_nulls) + + @PublicAPI(api_group=CDS_API_GROUP) + def mean( + self, on: Union[str, List[str]] = None, ignore_nulls: bool = True + ) -> Dataset: + """Compute grouped mean aggregation. + + Examples: + >>> import ray + >>> ray.data.le(100).groupby("value").mean() # doctest: +SKIP + >>> ray.data.from_items([ # doctest: +SKIP + ... {"A": i % 3, "B": i, "C": i**2} # doctest: +SKIP + ... for i in range(100)]) \ # doctest: +SKIP + ... .groupby("A") \ # doctest: +SKIP + ... .mean(["B", "C"]) # doctest: +SKIP + + Args: + on: a column name or a list of column names to aggregate. + ignore_nulls: Whether to ignore null values. If ``True``, null + values will be ignored when computing the mean; if ``False``, + if a null value is encountered, the output will be null. + We consider np.nan, None, and pd.NaT to be null values. + Default is ``True``. + + Returns: + The mean result. + + For different values of ``on``, the return varies: + + - ``on=None``: a dataset containing a groupby key column, + ``"k"``, and a column-wise mean column for each original column + in the dataset. + - ``on=["col_1", ..., "col_n"]``: a dataset of ``n + 1`` + columns where the first column is the groupby key and the second + through ``n + 1`` columns are the results of the aggregations. + + If groupby key is ``None`` then the key part of return is omitted. + """ + return self._aggregate_on(Mean, on, ignore_nulls) + + @PublicAPI(api_group=CDS_API_GROUP) + def std( + self, + on: Union[str, List[str]] = None, + ddof: int = 1, + ignore_nulls: bool = True, + ) -> Dataset: + """Compute grouped standard deviation aggregation. + + Examples: + >>> import ray + >>> ray.data.range(100).groupby("id").std(ddof=0) # doctest: +SKIP + >>> ray.data.from_items([ # doctest: +SKIP + ... {"A": i % 3, "B": i, "C": i**2} # doctest: +SKIP + ... for i in range(100)]) \ # doctest: +SKIP + ... .groupby("A") \ # doctest: +SKIP + ... .std(["B", "C"]) # doctest: +SKIP + + NOTE: This uses Welford's online method for an accumulator-style + computation of the standard deviation. This method was chosen due to + it's numerical stability, and it being computable in a single pass. + This may give different (but more accurate) results than NumPy, Pandas, + and sklearn, which use a less numerically stable two-pass algorithm. + See + https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm + + Args: + on: a column name or a list of column names to aggregate. + ddof: Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + ignore_nulls: Whether to ignore null values. If ``True``, null + values will be ignored when computing the std; if ``False``, + if a null value is encountered, the output will be null. + We consider np.nan, None, and pd.NaT to be null values. + Default is ``True``. + + Returns: + The standard deviation result. + + For different values of ``on``, the return varies: + + - ``on=None``: a dataset containing a groupby key column, + ``"k"``, and a column-wise std column for each original column in + the dataset. + - ``on=["col_1", ..., "col_n"]``: a dataset of ``n + 1`` + columns where the first column is the groupby key and the second + through ``n + 1`` columns are the results of the aggregations. + + If groupby key is ``None`` then the key part of return is omitted. + """ + return self._aggregate_on(Std, on, ignore_nulls, ddof=ddof) + + +# Backwards compatibility alias. +GroupedDataset = GroupedData diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/preprocessor.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..9db73405a702cd3f7786c61b5a34d272e8b47db7 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/preprocessor.py @@ -0,0 +1,318 @@ +import abc +import base64 +import collections +import pickle +import warnings +from enum import Enum +from typing import TYPE_CHECKING, Any, Dict, Union + +from ray.air.util.data_batch_conversion import BatchFormat +from ray.util.annotations import DeveloperAPI, PublicAPI + +if TYPE_CHECKING: + import numpy as np + import pandas as pd + + from ray.air.data_batch_type import DataBatchType + from ray.data import Dataset + + +@PublicAPI(stability="beta") +class PreprocessorNotFittedException(RuntimeError): + """Error raised when the preprocessor needs to be fitted first.""" + + pass + + +@PublicAPI(stability="beta") +class Preprocessor(abc.ABC): + """Implements an ML preprocessing operation. + + Preprocessors are stateful objects that can be fitted against a Dataset and used + to transform both local data batches and distributed data. For example, a + Normalization preprocessor may calculate the mean and stdev of a field during + fitting, and uses these attributes to implement its normalization transform. + + Preprocessors can also be stateless and transform data without needed to be fitted. + For example, a preprocessor may simply remove a column, which does not require + any state to be fitted. + + If you are implementing your own Preprocessor sub-class, you should override the + following: + + * ``_fit`` if your preprocessor is stateful. Otherwise, set + ``_is_fittable=False``. + * ``_transform_pandas`` and/or ``_transform_numpy`` for best performance, + implement both. Otherwise, the data will be converted to the match the + implemented method. + """ + + class FitStatus(str, Enum): + """The fit status of preprocessor.""" + + NOT_FITTABLE = "NOT_FITTABLE" + NOT_FITTED = "NOT_FITTED" + # Only meaningful for Chain preprocessors. + # At least one contained preprocessor in the chain preprocessor + # is fitted and at least one that can be fitted is not fitted yet. + # This is a state that show up if caller only interacts + # with the chain preprocessor through intended Preprocessor APIs. + PARTIALLY_FITTED = "PARTIALLY_FITTED" + FITTED = "FITTED" + + # Preprocessors that do not need to be fitted must override this. + _is_fittable = True + + def _check_has_fitted_state(self): + """Checks if the Preprocessor has fitted state. + + This is also used as an indiciation if the Preprocessor has been fit, following + convention from Ray versions prior to 2.6. + This allows preprocessors that have been fit in older versions of Ray to be + used to transform data in newer versions. + """ + + fitted_vars = [v for v in vars(self) if v.endswith("_")] + return bool(fitted_vars) + + def fit_status(self) -> "Preprocessor.FitStatus": + if not self._is_fittable: + return Preprocessor.FitStatus.NOT_FITTABLE + elif ( + hasattr(self, "_fitted") and self._fitted + ) or self._check_has_fitted_state(): + return Preprocessor.FitStatus.FITTED + else: + return Preprocessor.FitStatus.NOT_FITTED + + def fit(self, ds: "Dataset") -> "Preprocessor": + """Fit this Preprocessor to the Dataset. + + Fitted state attributes will be directly set in the Preprocessor. + + Calling it more than once will overwrite all previously fitted state: + ``preprocessor.fit(A).fit(B)`` is equivalent to ``preprocessor.fit(B)``. + + Args: + ds: Input dataset. + + Returns: + Preprocessor: The fitted Preprocessor with state attributes. + """ + fit_status = self.fit_status() + if fit_status == Preprocessor.FitStatus.NOT_FITTABLE: + # No-op as there is no state to be fitted. + return self + + if fit_status in ( + Preprocessor.FitStatus.FITTED, + Preprocessor.FitStatus.PARTIALLY_FITTED, + ): + warnings.warn( + "`fit` has already been called on the preprocessor (or at least one " + "contained preprocessors if this is a chain). " + "All previously fitted state will be overwritten!" + ) + + fitted_ds = self._fit(ds) + self._fitted = True + return fitted_ds + + def fit_transform(self, ds: "Dataset") -> "Dataset": + """Fit this Preprocessor to the Dataset and then transform the Dataset. + + Calling it more than once will overwrite all previously fitted state: + ``preprocessor.fit_transform(A).fit_transform(B)`` + is equivalent to ``preprocessor.fit_transform(B)``. + + Args: + ds: Input Dataset. + + Returns: + ray.data.Dataset: The transformed Dataset. + """ + self.fit(ds) + return self.transform(ds) + + def transform(self, ds: "Dataset") -> "Dataset": + """Transform the given dataset. + + Args: + ds: Input Dataset. + + Returns: + ray.data.Dataset: The transformed Dataset. + + Raises: + PreprocessorNotFittedException: if ``fit`` is not called yet. + """ + fit_status = self.fit_status() + if fit_status in ( + Preprocessor.FitStatus.PARTIALLY_FITTED, + Preprocessor.FitStatus.NOT_FITTED, + ): + raise PreprocessorNotFittedException( + "`fit` must be called before `transform`, " + "or simply use fit_transform() to run both steps" + ) + transformed_ds = self._transform(ds) + return transformed_ds + + def transform_batch(self, data: "DataBatchType") -> "DataBatchType": + """Transform a single batch of data. + + The data will be converted to the format supported by the Preprocessor, + based on which ``_transform_*`` methods are implemented. + + Args: + data: Input data batch. + + Returns: + DataBatchType: + The transformed data batch. This may differ + from the input type depending on which ``_transform_*`` methods + are implemented. + """ + fit_status = self.fit_status() + if fit_status in ( + Preprocessor.FitStatus.PARTIALLY_FITTED, + Preprocessor.FitStatus.NOT_FITTED, + ): + raise PreprocessorNotFittedException( + "`fit` must be called before `transform_batch`." + ) + return self._transform_batch(data) + + @DeveloperAPI + def _fit(self, ds: "Dataset") -> "Preprocessor": + """Sub-classes should override this instead of fit().""" + raise NotImplementedError() + + def _determine_transform_to_use(self) -> BatchFormat: + """Determine which batch format to use based on Preprocessor implementation. + + * If only `_transform_pandas` is implemented, then use ``pandas`` batch format. + * If only `_transform_numpy` is implemented, then use ``numpy`` batch format. + * If both are implemented, then use the Preprocessor defined preferred batch + format. + """ + + has_transform_pandas = ( + self.__class__._transform_pandas != Preprocessor._transform_pandas + ) + has_transform_numpy = ( + self.__class__._transform_numpy != Preprocessor._transform_numpy + ) + + if has_transform_numpy and has_transform_pandas: + return self.preferred_batch_format() + elif has_transform_numpy: + return BatchFormat.NUMPY + elif has_transform_pandas: + return BatchFormat.PANDAS + else: + raise NotImplementedError( + "None of `_transform_numpy` or `_transform_pandas` are implemented. " + "At least one of these transform functions must be implemented " + "for Preprocessor transforms." + ) + + def _transform(self, ds: "Dataset") -> "Dataset": + # TODO(matt): Expose `batch_size` or similar configurability. + # The default may be too small for some datasets and too large for others. + transform_type = self._determine_transform_to_use() + + # Our user-facing batch format should only be pandas or NumPy, other + # formats {arrow, simple} are internal. + kwargs = self._get_transform_config() + if transform_type == BatchFormat.PANDAS: + return ds.map_batches( + self._transform_pandas, batch_format=BatchFormat.PANDAS, **kwargs + ) + elif transform_type == BatchFormat.NUMPY: + return ds.map_batches( + self._transform_numpy, batch_format=BatchFormat.NUMPY, **kwargs + ) + else: + raise ValueError( + "Invalid transform type returned from _determine_transform_to_use; " + f'"pandas" and "numpy" allowed, but got: {transform_type}' + ) + + def _get_transform_config(self) -> Dict[str, Any]: + """Returns kwargs to be passed to :meth:`ray.data.Dataset.map_batches`. + + This can be implemented by subclassing preprocessors. + """ + return {} + + def _transform_batch(self, data: "DataBatchType") -> "DataBatchType": + # For minimal install to locally import air modules + import numpy as np + import pandas as pd + + from ray.air.util.data_batch_conversion import ( + _convert_batch_type_to_numpy, + _convert_batch_type_to_pandas, + ) + + try: + import pyarrow + except ImportError: + pyarrow = None + + if not isinstance( + data, (pd.DataFrame, pyarrow.Table, collections.abc.Mapping, np.ndarray) + ): + raise ValueError( + "`transform_batch` is currently only implemented for Pandas " + "DataFrames, pyarrow Tables, NumPy ndarray and dictionary of " + f"ndarray. Got {type(data)}." + ) + + transform_type = self._determine_transform_to_use() + + if transform_type == BatchFormat.PANDAS: + return self._transform_pandas(_convert_batch_type_to_pandas(data)) + elif transform_type == BatchFormat.NUMPY: + return self._transform_numpy(_convert_batch_type_to_numpy(data)) + + @DeveloperAPI + def _transform_pandas(self, df: "pd.DataFrame") -> "pd.DataFrame": + """Run the transformation on a data batch in a Pandas DataFrame format.""" + raise NotImplementedError() + + @DeveloperAPI + def _transform_numpy( + self, np_data: Union["np.ndarray", Dict[str, "np.ndarray"]] + ) -> Union["np.ndarray", Dict[str, "np.ndarray"]]: + """Run the transformation on a data batch in a NumPy ndarray format.""" + raise NotImplementedError() + + @classmethod + @DeveloperAPI + def preferred_batch_format(cls) -> BatchFormat: + """Batch format hint for upstream producers to try yielding best block format. + + The preferred batch format to use if both `_transform_pandas` and + `_transform_numpy` are implemented. Defaults to Pandas. + + Can be overriden by Preprocessor classes depending on which transform + path is the most optimal. + """ + return BatchFormat.PANDAS + + @DeveloperAPI + def serialize(self) -> str: + """Return this preprocessor serialized as a string. + Note: this is not a stable serialization format as it uses `pickle`. + """ + # Convert it to a plain string so that it can be included as JSON metadata + # in Trainer checkpoints. + return base64.b64encode(pickle.dumps(self)).decode("ascii") + + @staticmethod + @DeveloperAPI + def deserialize(serialized: str) -> "Preprocessor": + """Load the original preprocessor serialized via `self.serialize()`.""" + return pickle.loads(base64.b64decode(serialized)) diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/random_access_dataset.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/random_access_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..a24c6796f7ca6b3dcbb63b97a722882e7b0d4687 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/random_access_dataset.py @@ -0,0 +1,293 @@ +import bisect +import logging +import random +import time +from collections import defaultdict +from typing import TYPE_CHECKING, Any, List, Optional + +import numpy as np + +import ray +from ray.data._internal.execution.interfaces.ref_bundle import ( + _ref_bundles_iterator_to_block_refs_list, +) +from ray.data._internal.remote_fn import cached_remote_fn +from ray.data.block import BlockAccessor +from ray.data.context import DataContext +from ray.types import ObjectRef +from ray.util.annotations import PublicAPI + +try: + import pyarrow as pa +except ImportError: + pa = None + +if TYPE_CHECKING: + from ray.data import Dataset + +logger = logging.getLogger(__name__) + + +@PublicAPI(stability="alpha") +class RandomAccessDataset: + """A class that provides distributed, random access to a Dataset. + + See: ``Dataset.to_random_access_dataset()``. + """ + + def __init__( + self, + ds: "Dataset", + key: str, + num_workers: int, + ): + """Construct a RandomAccessDataset (internal API). + + The constructor is a private API. Use ``ds.to_random_access_dataset()`` + to construct a RandomAccessDataset. + """ + schema = ds.schema(fetch_if_missing=True) + if schema is None or isinstance(schema, type): + raise ValueError("RandomAccessDataset only supports Arrow-format blocks.") + + start = time.perf_counter() + logger.info("[setup] Indexing dataset by sort key.") + sorted_ds = ds.sort(key) + get_bounds = cached_remote_fn(_get_bounds) + bundles = sorted_ds.iter_internal_ref_bundles() + blocks = _ref_bundles_iterator_to_block_refs_list(bundles) + + logger.info("[setup] Computing block range bounds.") + bounds = ray.get([get_bounds.remote(b, key) for b in blocks]) + self._non_empty_blocks = [] + self._lower_bound = None + self._upper_bounds = [] + for i, b in enumerate(bounds): + if b: + self._non_empty_blocks.append(blocks[i]) + if self._lower_bound is None: + self._lower_bound = b[0] + self._upper_bounds.append(b[1]) + + logger.info("[setup] Creating {} random access workers.".format(num_workers)) + ctx = DataContext.get_current() + scheduling_strategy = ctx.scheduling_strategy + self._workers = [ + _RandomAccessWorker.options(scheduling_strategy=scheduling_strategy).remote( + key + ) + for _ in range(num_workers) + ] + ( + self._block_to_workers_map, + self._worker_to_blocks_map, + ) = self._compute_block_to_worker_assignments() + + logger.info( + "[setup] Worker to blocks assignment: {}".format(self._worker_to_blocks_map) + ) + ray.get( + [ + w.assign_blocks.remote( + { + i: self._non_empty_blocks[i] + for i in self._worker_to_blocks_map[w] + } + ) + for w in self._workers + ] + ) + + logger.info("[setup] Finished assigning blocks to workers.") + self._build_time = time.perf_counter() - start + + def _compute_block_to_worker_assignments(self): + # Return values. + block_to_workers: dict[int, List["ray.ActorHandle"]] = defaultdict(list) + worker_to_blocks: dict["ray.ActorHandle", List[int]] = defaultdict(list) + + # Aux data structures. + loc_to_workers: dict[str, List["ray.ActorHandle"]] = defaultdict(list) + locs = ray.get([w.ping.remote() for w in self._workers]) + for i, loc in enumerate(locs): + loc_to_workers[loc].append(self._workers[i]) + block_locs = ray.experimental.get_object_locations(self._non_empty_blocks) + + # First, try to assign all blocks to all workers at its location. + for block_idx, block in enumerate(self._non_empty_blocks): + block_info = block_locs[block] + locs = block_info.get("node_ids", []) + for loc in locs: + for worker in loc_to_workers[loc]: + block_to_workers[block_idx].append(worker) + worker_to_blocks[worker].append(block_idx) + + # Randomly assign any leftover blocks to at least one worker. + # TODO: the load balancing here could be improved. + for block_idx, block in enumerate(self._non_empty_blocks): + if len(block_to_workers[block_idx]) == 0: + worker = random.choice(self._workers) + block_to_workers[block_idx].append(worker) + worker_to_blocks[worker].append(block_idx) + + return block_to_workers, worker_to_blocks + + def get_async(self, key: Any) -> ObjectRef[Any]: + """Asynchronously finds the record for a single key. + + Args: + key: The key of the record to find. + + Returns: + ObjectRef containing the record (in pydict form), or None if not found. + """ + block_index = self._find_le(key) + if block_index is None: + return ray.put(None) + return self._worker_for(block_index).get.remote(block_index, key) + + def multiget(self, keys: List[Any]) -> List[Optional[Any]]: + """Synchronously find the records for a list of keys. + + Args: + keys: List of keys to find the records for. + + Returns: + List of found records (in pydict form), or None for missing records. + """ + batches = defaultdict(list) + for k in keys: + batches[self._find_le(k)].append(k) + futures = {} + for index, keybatch in batches.items(): + if index is None: + continue + fut = self._worker_for(index).multiget.remote( + [index] * len(keybatch), keybatch + ) + futures[index] = fut + results = {} + for i, fut in futures.items(): + keybatch = batches[i] + values = ray.get(fut) + for k, v in zip(keybatch, values): + results[k] = v + return [results.get(k) for k in keys] + + def stats(self) -> str: + """Returns a string containing access timing information.""" + stats = ray.get([w.stats.remote() for w in self._workers]) + total_time = sum(s["total_time"] for s in stats) + accesses = [s["num_accesses"] for s in stats] + blocks = [s["num_blocks"] for s in stats] + msg = "RandomAccessDataset:\n" + msg += "- Build time: {}s\n".format(round(self._build_time, 2)) + msg += "- Num workers: {}\n".format(len(stats)) + msg += "- Blocks per worker: {} min, {} max, {} mean\n".format( + min(blocks), max(blocks), int(sum(blocks) / len(blocks)) + ) + msg += "- Accesses per worker: {} min, {} max, {} mean\n".format( + min(accesses), max(accesses), int(sum(accesses) / len(accesses)) + ) + msg += "- Mean access time: {}us\n".format( + int(total_time / (1 + sum(accesses)) * 1e6) + ) + return msg + + def _worker_for(self, block_index: int): + return random.choice(self._block_to_workers_map[block_index]) + + def _find_le(self, x: Any) -> int: + i = bisect.bisect_left(self._upper_bounds, x) + if i >= len(self._upper_bounds) or x < self._lower_bound: + return None + return i + + +@ray.remote(num_cpus=0) +class _RandomAccessWorker: + def __init__(self, key_field): + self.blocks = None + self.key_field = key_field + self.num_accesses = 0 + self.total_time = 0 + + def assign_blocks(self, block_ref_dict): + self.blocks = {k: ray.get(ref) for k, ref in block_ref_dict.items()} + + def get(self, block_index, key): + start = time.perf_counter() + result = self._get(block_index, key) + self.total_time += time.perf_counter() - start + self.num_accesses += 1 + return result + + def multiget(self, block_indices, keys): + start = time.perf_counter() + block = self.blocks[block_indices[0]] + if len(set(block_indices)) == 1 and isinstance( + self.blocks[block_indices[0]], pa.Table + ): + # Fast path: use np.searchsorted for vectorized search on a single block. + # This is ~3x faster than the naive case. + block = self.blocks[block_indices[0]] + col = block[self.key_field] + indices = np.searchsorted(col, keys) + acc = BlockAccessor.for_block(block) + result = [acc._get_row(i) for i in indices] + # assert result == [self._get(i, k) for i, k in zip(block_indices, keys)] + else: + result = [self._get(i, k) for i, k in zip(block_indices, keys)] + self.total_time += time.perf_counter() - start + self.num_accesses += 1 + return result + + def ping(self): + return ray.get_runtime_context().get_node_id() + + def stats(self) -> dict: + return { + "num_blocks": len(self.blocks), + "num_accesses": self.num_accesses, + "total_time": self.total_time, + } + + def _get(self, block_index, key): + if block_index is None: + return None + block = self.blocks[block_index] + column = block[self.key_field] + if isinstance(block, pa.Table): + column = _ArrowListWrapper(column) + i = _binary_search_find(column, key) + if i is None: + return None + acc = BlockAccessor.for_block(block) + return acc._get_row(i) + + +def _binary_search_find(column, x): + i = bisect.bisect_left(column, x) + if i != len(column) and column[i] == x: + return i + return None + + +class _ArrowListWrapper: + def __init__(self, arrow_col): + self.arrow_col = arrow_col + + def __getitem__(self, i): + return self.arrow_col[i].as_py() + + def __len__(self): + return len(self.arrow_col) + + +def _get_bounds(block, key): + if len(block) == 0: + return None + b = (block[key][0], block[key][len(block) - 1]) + if isinstance(block, pa.Table): + b = (b[0].as_py(), b[1].as_py()) + return b diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/data/read_api.py b/infer_4_47_1/lib/python3.10/site-packages/ray/data/read_api.py new file mode 100644 index 0000000000000000000000000000000000000000..d60a89858512b79347d07e6a8d763a2b7e29efa0 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/data/read_api.py @@ -0,0 +1,3345 @@ +import collections +import logging +import os +import warnings +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + List, + Literal, + Optional, + Tuple, + TypeVar, + Union, +) + +import numpy as np + +import ray +from ray._private.auto_init_hook import wrap_auto_init +from ray.air.util.tensor_extensions.utils import _create_possibly_ragged_ndarray +from ray.data._internal.datasource.avro_datasource import AvroDatasource +from ray.data._internal.datasource.bigquery_datasource import BigQueryDatasource +from ray.data._internal.datasource.binary_datasource import BinaryDatasource +from ray.data._internal.datasource.csv_datasource import CSVDatasource +from ray.data._internal.datasource.delta_sharing_datasource import ( + DeltaSharingDatasource, +) +from ray.data._internal.datasource.hudi_datasource import HudiDatasource +from ray.data._internal.datasource.iceberg_datasource import IcebergDatasource +from ray.data._internal.datasource.image_datasource import ( + ImageDatasource, + ImageFileMetadataProvider, +) +from ray.data._internal.datasource.json_datasource import JSONDatasource +from ray.data._internal.datasource.lance_datasource import LanceDatasource +from ray.data._internal.datasource.mongo_datasource import MongoDatasource +from ray.data._internal.datasource.numpy_datasource import NumpyDatasource +from ray.data._internal.datasource.parquet_bulk_datasource import ParquetBulkDatasource +from ray.data._internal.datasource.parquet_datasource import ParquetDatasource +from ray.data._internal.datasource.range_datasource import RangeDatasource +from ray.data._internal.datasource.sql_datasource import SQLDatasource +from ray.data._internal.datasource.text_datasource import TextDatasource +from ray.data._internal.datasource.tfrecords_datasource import TFRecordDatasource +from ray.data._internal.datasource.torch_datasource import TorchDatasource +from ray.data._internal.datasource.webdataset_datasource import WebDatasetDatasource +from ray.data._internal.delegating_block_builder import DelegatingBlockBuilder +from ray.data._internal.logical.operators.from_operators import ( + FromArrow, + FromBlocks, + FromItems, + FromNumpy, + FromPandas, +) +from ray.data._internal.logical.operators.read_operator import Read +from ray.data._internal.logical.optimizers import LogicalPlan +from ray.data._internal.plan import ExecutionPlan +from ray.data._internal.remote_fn import cached_remote_fn +from ray.data._internal.stats import DatasetStats +from ray.data._internal.util import ( + _autodetect_parallelism, + get_table_block_metadata, + ndarray_to_block, + pandas_df_to_arrow_block, +) +from ray.data.block import Block, BlockAccessor, BlockExecStats, BlockMetadata +from ray.data.context import DataContext +from ray.data.dataset import Dataset, MaterializedDataset +from ray.data.datasource import ( + BaseFileMetadataProvider, + Connection, + Datasource, + PathPartitionFilter, +) +from ray.data.datasource.datasource import Reader +from ray.data.datasource.file_based_datasource import ( + _unwrap_arrow_serialization_workaround, +) +from ray.data.datasource.file_meta_provider import ( + DefaultFileMetadataProvider, + FastFileMetadataProvider, +) +from ray.data.datasource.parquet_meta_provider import ParquetMetadataProvider +from ray.data.datasource.partitioning import Partitioning +from ray.types import ObjectRef +from ray.util.annotations import Deprecated, DeveloperAPI, PublicAPI +from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy + +if TYPE_CHECKING: + import dask + import datasets + import mars + import modin + import pandas + import pyarrow + import pymongoarrow.api + import pyspark + import tensorflow as tf + import torch + from pyiceberg.expressions import BooleanExpression + from tensorflow_metadata.proto.v0 import schema_pb2 + + from ray.data._internal.datasource.tfrecords_datasource import TFXReadOptions + + +T = TypeVar("T") + +logger = logging.getLogger(__name__) + + +@DeveloperAPI +def from_blocks(blocks: List[Block]): + """Create a :class:`~ray.data.Dataset` from a list of blocks. + + This method is primarily used for testing. Unlike other methods like + :func:`~ray.data.from_pandas` and :func:`~ray.data.from_arrow`, this method + gaurentees that it won't modify the number of blocks. + + Args: + blocks: List of blocks to create the dataset from. + + Returns: + A :class:`~ray.data.Dataset` holding the blocks. + """ + block_refs = [ray.put(block) for block in blocks] + metadata = [BlockAccessor.for_block(block).get_metadata() for block in blocks] + from_blocks_op = FromBlocks(block_refs, metadata) + execution_plan = ExecutionPlan( + DatasetStats(metadata={"FromBlocks": metadata}, parent=None) + ) + logical_plan = LogicalPlan(from_blocks_op, execution_plan._context) + return MaterializedDataset( + execution_plan, + logical_plan, + ) + + +@PublicAPI +def from_items( + items: List[Any], + *, + parallelism: int = -1, + override_num_blocks: Optional[int] = None, +) -> MaterializedDataset: + """Create a :class:`~ray.data.Dataset` from a list of local Python objects. + + Use this method to create small datasets from data that fits in memory. + + Examples: + + >>> import ray + >>> ds = ray.data.from_items([1, 2, 3, 4, 5]) + >>> ds + MaterializedDataset(num_blocks=..., num_rows=5, schema={item: int64}) + >>> ds.schema() + Column Type + ------ ---- + item int64 + + Args: + items: List of local Python objects. + parallelism: This argument is deprecated. Use ``override_num_blocks`` argument. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + + Returns: + A :class:`~ray.data.Dataset` holding the items. + """ + import builtins + + parallelism = _get_num_output_blocks(parallelism, override_num_blocks) + if parallelism == 0: + raise ValueError(f"parallelism must be -1 or > 0, got: {parallelism}") + + detected_parallelism, _, _ = _autodetect_parallelism( + parallelism, + ray.util.get_current_placement_group(), + DataContext.get_current(), + ) + # Truncate parallelism to number of items to avoid empty blocks. + detected_parallelism = min(len(items), detected_parallelism) + + if detected_parallelism > 0: + block_size, remainder = divmod(len(items), detected_parallelism) + else: + block_size, remainder = 0, 0 + # NOTE: We need to explicitly use the builtins range since we override range below, + # with the definition of ray.data.range. + blocks: List[ObjectRef[Block]] = [] + metadata: List[BlockMetadata] = [] + for i in builtins.range(detected_parallelism): + stats = BlockExecStats.builder() + builder = DelegatingBlockBuilder() + # Evenly distribute remainder across block slices while preserving record order. + block_start = i * block_size + min(i, remainder) + block_end = (i + 1) * block_size + min(i + 1, remainder) + for j in builtins.range(block_start, block_end): + item = items[j] + if not isinstance(item, collections.abc.Mapping): + item = {"item": item} + builder.add(item) + block = builder.build() + blocks.append(ray.put(block)) + metadata.append( + BlockAccessor.for_block(block).get_metadata(exec_stats=stats.build()) + ) + + from_items_op = FromItems(blocks, metadata) + execution_plan = ExecutionPlan( + DatasetStats(metadata={"FromItems": metadata}, parent=None) + ) + logical_plan = LogicalPlan(from_items_op, execution_plan._context) + return MaterializedDataset( + execution_plan, + logical_plan, + ) + + +@PublicAPI +def range( + n: int, + *, + parallelism: int = -1, + concurrency: Optional[int] = None, + override_num_blocks: Optional[int] = None, +) -> Dataset: + """Creates a :class:`~ray.data.Dataset` from a range of integers [0..n). + + This function allows for easy creation of synthetic datasets for testing or + benchmarking :ref:`Ray Data `. + + Examples: + + >>> import ray + >>> ds = ray.data.range(10000) + >>> ds + Dataset(num_rows=10000, schema={id: int64}) + >>> ds.map(lambda row: {"id": row["id"] * 2}).take(4) + [{'id': 0}, {'id': 2}, {'id': 4}, {'id': 6}] + + Args: + n: The upper bound of the range of integers. + parallelism: This argument is deprecated. Use ``override_num_blocks`` argument. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run or the total number of output blocks. By default, + concurrency is dynamically decided based on the available resources. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + + Returns: + A :class:`~ray.data.Dataset` producing the integers from the range 0 to n. + + .. seealso:: + + :meth:`~ray.data.range_tensor` + Call this method for creating synthetic datasets of tensor data. + + """ + datasource = RangeDatasource(n=n, block_format="arrow", column_name="id") + return read_datasource( + datasource, + parallelism=parallelism, + concurrency=concurrency, + override_num_blocks=override_num_blocks, + ) + + +@PublicAPI +def range_tensor( + n: int, + *, + shape: Tuple = (1,), + parallelism: int = -1, + concurrency: Optional[int] = None, + override_num_blocks: Optional[int] = None, +) -> Dataset: + """Creates a :class:`~ray.data.Dataset` tensors of the provided shape from range + [0...n]. + + This function allows for easy creation of synthetic tensor datasets for testing or + benchmarking :ref:`Ray Data `. + + Examples: + + >>> import ray + >>> ds = ray.data.range_tensor(1000, shape=(2, 2)) + >>> ds + Dataset(num_rows=1000, schema={data: numpy.ndarray(shape=(2, 2), dtype=int64)}) + >>> ds.map_batches(lambda row: {"data": row["data"] * 2}).take(2) + [{'data': array([[0, 0], + [0, 0]])}, {'data': array([[2, 2], + [2, 2]])}] + + Args: + n: The upper bound of the range of tensor records. + shape: The shape of each tensor in the dataset. + parallelism: This argument is deprecated. Use ``override_num_blocks`` argument. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run or the total number of output blocks. By default, + concurrency is dynamically decided based on the available resources. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + + Returns: + A :class:`~ray.data.Dataset` producing the tensor data from range 0 to n. + + .. seealso:: + + :meth:`~ray.data.range` + Call this method to create synthetic datasets of integer data. + + """ + datasource = RangeDatasource( + n=n, block_format="tensor", column_name="data", tensor_shape=tuple(shape) + ) + return read_datasource( + datasource, + parallelism=parallelism, + concurrency=concurrency, + override_num_blocks=override_num_blocks, + ) + + +@PublicAPI +@wrap_auto_init +def read_datasource( + datasource: Datasource, + *, + parallelism: int = -1, + ray_remote_args: Dict[str, Any] = None, + concurrency: Optional[int] = None, + override_num_blocks: Optional[int] = None, + **read_args, +) -> Dataset: + """Read a stream from a custom :class:`~ray.data.Datasource`. + + Args: + datasource: The :class:`~ray.data.Datasource` to read data from. + parallelism: This argument is deprecated. Use ``override_num_blocks`` argument. + ray_remote_args: kwargs passed to :meth:`ray.remote` in the read tasks. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run or the total number of output blocks. By default, + concurrency is dynamically decided based on the available resources. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + read_args: Additional kwargs to pass to the :class:`~ray.data.Datasource` + implementation. + + Returns: + :class:`~ray.data.Dataset` that reads data from the :class:`~ray.data.Datasource`. + """ # noqa: E501 + parallelism = _get_num_output_blocks(parallelism, override_num_blocks) + + ctx = DataContext.get_current() + + if ray_remote_args is None: + ray_remote_args = {} + + if not datasource.supports_distributed_reads: + ray_remote_args["scheduling_strategy"] = NodeAffinitySchedulingStrategy( + ray.get_runtime_context().get_node_id(), + soft=False, + ) + + if "scheduling_strategy" not in ray_remote_args: + ray_remote_args["scheduling_strategy"] = ctx.scheduling_strategy + + datasource_or_legacy_reader = _get_datasource_or_legacy_reader( + datasource, + ctx, + read_args, + ) + + cur_pg = ray.util.get_current_placement_group() + requested_parallelism, _, inmemory_size = _autodetect_parallelism( + parallelism, + ctx.target_max_block_size, + DataContext.get_current(), + datasource_or_legacy_reader, + placement_group=cur_pg, + ) + + # TODO(hchen/chengsu): Remove the duplicated get_read_tasks call here after + # removing LazyBlockList code path. + read_tasks = datasource_or_legacy_reader.get_read_tasks(requested_parallelism) + import uuid + + stats = DatasetStats( + metadata={"Read": [read_task.metadata for read_task in read_tasks]}, + parent=None, + needs_stats_actor=True, + stats_uuid=uuid.uuid4(), + ) + read_op = Read( + datasource, + datasource_or_legacy_reader, + parallelism, + inmemory_size, + len(read_tasks) if read_tasks else 0, + ray_remote_args, + concurrency, + ) + execution_plan = ExecutionPlan(stats) + logical_plan = LogicalPlan(read_op, execution_plan._context) + return Dataset( + plan=execution_plan, + logical_plan=logical_plan, + ) + + +@PublicAPI(stability="alpha") +def read_mongo( + uri: str, + database: str, + collection: str, + *, + pipeline: Optional[List[Dict]] = None, + schema: Optional["pymongoarrow.api.Schema"] = None, + parallelism: int = -1, + ray_remote_args: Dict[str, Any] = None, + concurrency: Optional[int] = None, + override_num_blocks: Optional[int] = None, + **mongo_args, +) -> Dataset: + """Create a :class:`~ray.data.Dataset` from a MongoDB database. + + The data to read from is specified via the ``uri``, ``database`` and ``collection`` + of the MongoDB. The dataset is created from the results of executing + ``pipeline`` against the ``collection``. If ``pipeline`` is None, the entire + ``collection`` is read. + + .. tip:: + + For more details about these MongoDB concepts, see the following: + - URI: https://www.mongodb.com/docs/manual/reference/connection-string/ + - Database and Collection: https://www.mongodb.com/docs/manual/core/databases-and-collections/ + - Pipeline: https://www.mongodb.com/docs/manual/core/aggregation-pipeline/ + + To read the MongoDB in parallel, the execution of the pipeline is run on partitions + of the collection, with a Ray read task to handle a partition. Partitions are + created in an attempt to evenly distribute the documents into the specified number + of partitions. The number of partitions is determined by ``parallelism`` which can + be requested from this interface or automatically chosen if unspecified (see the + ``parallelism`` arg below). + + Examples: + >>> import ray + >>> from pymongoarrow.api import Schema # doctest: +SKIP + >>> ds = ray.data.read_mongo( # doctest: +SKIP + ... uri="mongodb://username:password@mongodb0.example.com:27017/?authSource=admin", # noqa: E501 + ... database="my_db", + ... collection="my_collection", + ... pipeline=[{"$match": {"col2": {"$gte": 0, "$lt": 100}}}, {"$sort": "sort_field"}], # noqa: E501 + ... schema=Schema({"col1": pa.string(), "col2": pa.int64()}), + ... override_num_blocks=10, + ... ) + + Args: + uri: The URI of the source MongoDB where the dataset is + read from. For the URI format, see details in the `MongoDB docs `_. + database: The name of the database hosted in the MongoDB. This database + must exist otherwise ValueError is raised. + collection: The name of the collection in the database. This collection + must exist otherwise ValueError is raised. + pipeline: A `MongoDB pipeline `_, which is executed on the given collection + with results used to create Dataset. If None, the entire collection will + be read. + schema: The schema used to read the collection. If None, it'll be inferred from + the results of pipeline. + parallelism: This argument is deprecated. Use ``override_num_blocks`` argument. + ray_remote_args: kwargs passed to :meth:`~ray.remote` in the read tasks. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run or the total number of output blocks. By default, + concurrency is dynamically decided based on the available resources. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + mongo_args: kwargs passed to `aggregate_arrow_all() `_ in pymongoarrow in producing + Arrow-formatted results. + + Returns: + :class:`~ray.data.Dataset` producing rows from the results of executing the pipeline on the specified MongoDB collection. + + Raises: + ValueError: if ``database`` doesn't exist. + ValueError: if ``collection`` doesn't exist. + """ + datasource = MongoDatasource( + uri=uri, + database=database, + collection=collection, + pipeline=pipeline, + schema=schema, + **mongo_args, + ) + return read_datasource( + datasource, + parallelism=parallelism, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + override_num_blocks=override_num_blocks, + ) + + +@PublicAPI(stability="alpha") +def read_bigquery( + project_id: str, + dataset: Optional[str] = None, + query: Optional[str] = None, + *, + parallelism: int = -1, + ray_remote_args: Dict[str, Any] = None, + concurrency: Optional[int] = None, + override_num_blocks: Optional[int] = None, +) -> Dataset: + """Create a dataset from BigQuery. + + The data to read from is specified via the ``project_id``, ``dataset`` + and/or ``query`` parameters. The dataset is created from the results of + executing ``query`` if a query is provided. Otherwise, the entire + ``dataset`` is read. + + For more information about BigQuery, see the following concepts: + + - Project id: `Creating and Managing Projects `_ + + - Dataset: `Datasets Intro `_ + + - Query: `Query Syntax `_ + + This method uses the BigQuery Storage Read API which reads in parallel, + with a Ray read task to handle each stream. The number of streams is + determined by ``parallelism`` which can be requested from this interface + or automatically chosen if unspecified (see the ``parallelism`` arg below). + + .. warning:: + The maximum query response size is 10GB. For more information, see `BigQuery response too large to return `_. + + Examples: + .. testcode:: + :skipif: True + + import ray + # Users will need to authenticate beforehand (https://cloud.google.com/sdk/gcloud/reference/auth/login) + ds = ray.data.read_bigquery( + project_id="my_project", + query="SELECT * FROM `bigquery-public-data.samples.gsod` LIMIT 1000", + ) + + Args: + project_id: The name of the associated Google Cloud Project that hosts the dataset to read. + For more information, see `Creating and Managing Projects `_. + dataset: The name of the dataset hosted in BigQuery in the format of ``dataset_id.table_id``. + Both the dataset_id and table_id must exist otherwise an exception will be raised. + parallelism: This argument is deprecated. Use ``override_num_blocks`` argument. + ray_remote_args: kwargs passed to ray.remote in the read tasks. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run or the total number of output blocks. By default, + concurrency is dynamically decided based on the available resources. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + + Returns: + Dataset producing rows from the results of executing the query (or reading the entire dataset) + on the specified BigQuery dataset. + """ # noqa: E501 + datasource = BigQueryDatasource(project_id=project_id, dataset=dataset, query=query) + return read_datasource( + datasource, + parallelism=parallelism, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + override_num_blocks=override_num_blocks, + ) + + +@PublicAPI +def read_parquet( + paths: Union[str, List[str]], + *, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + columns: Optional[List[str]] = None, + parallelism: int = -1, + ray_remote_args: Dict[str, Any] = None, + tensor_column_schema: Optional[Dict[str, Tuple[np.dtype, Tuple[int, ...]]]] = None, + meta_provider: Optional[ParquetMetadataProvider] = None, + partition_filter: Optional[PathPartitionFilter] = None, + partitioning: Optional[Partitioning] = Partitioning("hive"), + shuffle: Union[Literal["files"], None] = None, + include_paths: bool = False, + file_extensions: Optional[List[str]] = None, + concurrency: Optional[int] = None, + override_num_blocks: Optional[int] = None, + **arrow_parquet_args, +) -> Dataset: + """Creates a :class:`~ray.data.Dataset` from parquet files. + + + Examples: + Read a file in remote storage. + + >>> import ray + >>> ds = ray.data.read_parquet("s3://anonymous@ray-example-data/iris.parquet") + >>> ds.schema() + Column Type + ------ ---- + sepal.length double + sepal.width double + petal.length double + petal.width double + variety string + + Read a directory in remote storage. + + >>> ds = ray.data.read_parquet("s3://anonymous@ray-example-data/iris-parquet/") + + Read multiple local files. + + >>> ray.data.read_parquet( + ... ["local:///path/to/file1", "local:///path/to/file2"]) # doctest: +SKIP + + Specify a schema for the parquet file. + + >>> import pyarrow as pa + >>> fields = [("sepal.length", pa.float32()), + ... ("sepal.width", pa.float32()), + ... ("petal.length", pa.float32()), + ... ("petal.width", pa.float32()), + ... ("variety", pa.string())] + >>> ds = ray.data.read_parquet("s3://anonymous@ray-example-data/iris.parquet", + ... schema=pa.schema(fields)) + >>> ds.schema() + Column Type + ------ ---- + sepal.length float + sepal.width float + petal.length float + petal.width float + variety string + + The Parquet reader also supports projection and filter pushdown, allowing column + selection and row filtering to be pushed down to the file scan. + + .. testcode:: + + import pyarrow as pa + + # Create a Dataset by reading a Parquet file, pushing column selection and + # row filtering down to the file scan. + ds = ray.data.read_parquet( + "s3://anonymous@ray-example-data/iris.parquet", + columns=["sepal.length", "variety"], + filter=pa.dataset.field("sepal.length") > 5.0, + ) + + ds.show(2) + + .. testoutput:: + + {'sepal.length': 5.1, 'variety': 'Setosa'} + {'sepal.length': 5.4, 'variety': 'Setosa'} + + For further arguments you can pass to PyArrow as a keyword argument, see the + `PyArrow API reference `_. + + Args: + paths: A single file path or directory, or a list of file paths. Multiple + directories are not supported. + filesystem: The PyArrow filesystem + implementation to read from. These filesystems are specified in the + `pyarrow docs `_. Specify this parameter if + you need to provide specific configurations to the filesystem. By default, + the filesystem is automatically selected based on the scheme of the paths. + For example, if the path begins with ``s3://``, the ``S3FileSystem`` is + used. If ``None``, this function uses a system-chosen implementation. + columns: A list of column names to read. Only the specified columns are + read during the file scan. + parallelism: This argument is deprecated. Use ``override_num_blocks`` argument. + ray_remote_args: kwargs passed to :meth:`~ray.remote` in the read tasks. + tensor_column_schema: A dict of column name to PyArrow dtype and shape + mappings for converting a Parquet column containing serialized + tensors (ndarrays) as their elements to PyArrow tensors. This function + assumes that the tensors are serialized in the raw + NumPy array format in C-contiguous order (e.g., via + `arr.tobytes()`). + meta_provider: A :ref:`file metadata provider `. Custom + metadata providers may be able to resolve file metadata more quickly and/or + accurately. In most cases you do not need to set this parameter. + partition_filter: A + :class:`~ray.data.datasource.partitioning.PathPartitionFilter`. Use + with a custom callback to read only selected partitions of a dataset. + partitioning: A :class:`~ray.data.datasource.partitioning.Partitioning` object + that describes how paths are organized. Defaults to HIVE partitioning. + shuffle: If setting to "files", randomly shuffle input files order before read. + Defaults to not shuffle with ``None``. + arrow_parquet_args: Other parquet read options to pass to PyArrow. For the full + set of arguments, see the `PyArrow API `_ + include_paths: If ``True``, include the path to each file. File paths are + stored in the ``'path'`` column. + file_extensions: A list of file extensions to filter files by. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run or the total number of output blocks. By default, + concurrency is dynamically decided based on the available resources. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + + Returns: + :class:`~ray.data.Dataset` producing records read from the specified parquet + files. + """ + _emit_meta_provider_deprecation_warning(meta_provider) + _validate_shuffle_arg(shuffle) + + if meta_provider is None: + meta_provider = ParquetMetadataProvider() + arrow_parquet_args = _resolve_parquet_args( + tensor_column_schema, + **arrow_parquet_args, + ) + + dataset_kwargs = arrow_parquet_args.pop("dataset_kwargs", None) + _block_udf = arrow_parquet_args.pop("_block_udf", None) + schema = arrow_parquet_args.pop("schema", None) + datasource = ParquetDatasource( + paths, + columns=columns, + dataset_kwargs=dataset_kwargs, + to_batch_kwargs=arrow_parquet_args, + _block_udf=_block_udf, + filesystem=filesystem, + schema=schema, + meta_provider=meta_provider, + partition_filter=partition_filter, + partitioning=partitioning, + shuffle=shuffle, + include_paths=include_paths, + file_extensions=file_extensions, + ) + return read_datasource( + datasource, + parallelism=parallelism, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + override_num_blocks=override_num_blocks, + ) + + +@PublicAPI(stability="beta") +def read_images( + paths: Union[str, List[str]], + *, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + parallelism: int = -1, + meta_provider: Optional[BaseFileMetadataProvider] = None, + ray_remote_args: Dict[str, Any] = None, + arrow_open_file_args: Optional[Dict[str, Any]] = None, + partition_filter: Optional[PathPartitionFilter] = None, + partitioning: Partitioning = None, + size: Optional[Tuple[int, int]] = None, + mode: Optional[str] = None, + include_paths: bool = False, + ignore_missing_paths: bool = False, + shuffle: Union[Literal["files"], None] = None, + file_extensions: Optional[List[str]] = ImageDatasource._FILE_EXTENSIONS, + concurrency: Optional[int] = None, + override_num_blocks: Optional[int] = None, +) -> Dataset: + """Creates a :class:`~ray.data.Dataset` from image files. + + Examples: + >>> import ray + >>> path = "s3://anonymous@ray-example-data/batoidea/JPEGImages/" + >>> ds = ray.data.read_images(path) + >>> ds.schema() + Column Type + ------ ---- + image numpy.ndarray(shape=(32, 32, 3), dtype=uint8) + + If you need image file paths, set ``include_paths=True``. + + >>> ds = ray.data.read_images(path, include_paths=True) + >>> ds.schema() + Column Type + ------ ---- + image numpy.ndarray(shape=(32, 32, 3), dtype=uint8) + path string + >>> ds.take(1)[0]["path"] + 'ray-example-data/batoidea/JPEGImages/1.jpeg' + + If your images are arranged like: + + .. code:: + + root/dog/xxx.png + root/dog/xxy.png + + root/cat/123.png + root/cat/nsdf3.png + + Then you can include the labels by specifying a + :class:`~ray.data.datasource.partitioning.Partitioning`. + + >>> import ray + >>> from ray.data.datasource.partitioning import Partitioning + >>> root = "s3://anonymous@ray-example-data/image-datasets/dir-partitioned" + >>> partitioning = Partitioning("dir", field_names=["class"], base_dir=root) + >>> ds = ray.data.read_images(root, size=(224, 224), partitioning=partitioning) + >>> ds.schema() + Column Type + ------ ---- + image numpy.ndarray(shape=(224, 224, 3), dtype=uint8) + class string + + Args: + paths: A single file or directory, or a list of file or directory paths. + A list of paths can contain both files and directories. + filesystem: The pyarrow filesystem + implementation to read from. These filesystems are specified in the + `pyarrow docs `_. Specify this parameter if + you need to provide specific configurations to the filesystem. By default, + the filesystem is automatically selected based on the scheme of the paths. + For example, if the path begins with ``s3://``, the `S3FileSystem` is used. + parallelism: This argument is deprecated. Use ``override_num_blocks`` argument. + meta_provider: [Deprecated] A :ref:`file metadata provider `. + Custom metadata providers may be able to resolve file metadata more quickly + and/or accurately. In most cases, you do not need to set this. If ``None``, + this function uses a system-chosen implementation. + ray_remote_args: kwargs passed to :meth:`~ray.remote` in the read tasks. + arrow_open_file_args: kwargs passed to + `pyarrow.fs.FileSystem.open_input_file `_. + when opening input files to read. + partition_filter: A + :class:`~ray.data.datasource.partitioning.PathPartitionFilter`. Use + with a custom callback to read only selected partitions of a dataset. + By default, this filters out any file paths whose file extension does not + match ``*.png``, ``*.jpg``, ``*.jpeg``, ``*.tiff``, ``*.bmp``, or ``*.gif``. + partitioning: A :class:`~ray.data.datasource.partitioning.Partitioning` object + that describes how paths are organized. Defaults to ``None``. + size: The desired height and width of loaded images. If unspecified, images + retain their original shape. + mode: A `Pillow mode `_ + describing the desired type and depth of pixels. If unspecified, image + modes are inferred by + `Pillow `_. + include_paths: If ``True``, include the path to each image. File paths are + stored in the ``'path'`` column. + ignore_missing_paths: If True, ignores any file/directory paths in ``paths`` + that are not found. Defaults to False. + shuffle: If setting to "files", randomly shuffle input files order before read. + Defaults to not shuffle with ``None``. + file_extensions: A list of file extensions to filter files by. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run or the total number of output blocks. By default, + concurrency is dynamically decided based on the available resources. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + + Returns: + A :class:`~ray.data.Dataset` producing tensors that represent the images at + the specified paths. For information on working with tensors, read the + :ref:`tensor data guide `. + + Raises: + ValueError: if ``size`` contains non-positive numbers. + ValueError: if ``mode`` is unsupported. + """ + _emit_meta_provider_deprecation_warning(meta_provider) + + if meta_provider is None: + meta_provider = ImageFileMetadataProvider() + + datasource = ImageDatasource( + paths, + size=size, + mode=mode, + include_paths=include_paths, + filesystem=filesystem, + meta_provider=meta_provider, + open_stream_args=arrow_open_file_args, + partition_filter=partition_filter, + partitioning=partitioning, + ignore_missing_paths=ignore_missing_paths, + shuffle=shuffle, + file_extensions=file_extensions, + ) + return read_datasource( + datasource, + parallelism=parallelism, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + override_num_blocks=override_num_blocks, + ) + + +@Deprecated +def read_parquet_bulk( + paths: Union[str, List[str]], + *, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + columns: Optional[List[str]] = None, + parallelism: int = -1, + ray_remote_args: Dict[str, Any] = None, + arrow_open_file_args: Optional[Dict[str, Any]] = None, + tensor_column_schema: Optional[Dict[str, Tuple[np.dtype, Tuple[int, ...]]]] = None, + meta_provider: Optional[BaseFileMetadataProvider] = None, + partition_filter: Optional[PathPartitionFilter] = None, + shuffle: Union[Literal["files"], None] = None, + include_paths: bool = False, + file_extensions: Optional[List[str]] = ParquetBulkDatasource._FILE_EXTENSIONS, + concurrency: Optional[int] = None, + override_num_blocks: Optional[int] = None, + **arrow_parquet_args, +) -> Dataset: + """Create :class:`~ray.data.Dataset` from parquet files without reading metadata. + + Use :meth:`~ray.data.read_parquet` for most cases. + + Use :meth:`~ray.data.read_parquet_bulk` if all the provided paths point to files + and metadata fetching using :meth:`~ray.data.read_parquet` takes too long or the + parquet files do not all have a unified schema. + + Performance slowdowns are possible when using this method with parquet files that + are very large. + + .. warning:: + + Only provide file paths as input (i.e., no directory paths). An + OSError is raised if one or more paths point to directories. If your + use-case requires directory paths, use :meth:`~ray.data.read_parquet` + instead. + + Examples: + Read multiple local files. You should always provide only input file paths + (i.e. no directory paths) when known to minimize read latency. + + >>> ray.data.read_parquet_bulk( # doctest: +SKIP + ... ["/path/to/file1", "/path/to/file2"]) + + Args: + paths: A single file path or a list of file paths. + filesystem: The PyArrow filesystem + implementation to read from. These filesystems are + specified in the + `PyArrow docs `_. + Specify this parameter if you need to provide specific configurations to + the filesystem. By default, the filesystem is automatically selected based + on the scheme of the paths. For example, if the path begins with ``s3://``, + the `S3FileSystem` is used. + columns: A list of column names to read. Only the + specified columns are read during the file scan. + parallelism: This argument is deprecated. Use ``override_num_blocks`` argument. + ray_remote_args: kwargs passed to :meth:`~ray.remote` in the read tasks. + arrow_open_file_args: kwargs passed to + `pyarrow.fs.FileSystem.open_input_file `_. + when opening input files to read. + tensor_column_schema: A dict of column name to PyArrow dtype and shape + mappings for converting a Parquet column containing serialized + tensors (ndarrays) as their elements to PyArrow tensors. This function + assumes that the tensors are serialized in the raw + NumPy array format in C-contiguous order (e.g. via + `arr.tobytes()`). + meta_provider: [Deprecated] A :ref:`file metadata provider `. + Custom metadata providers may be able to resolve file metadata more quickly + and/or accurately. In most cases, you do not need to set this. If ``None``, + this function uses a system-chosen implementation. + partition_filter: A + :class:`~ray.data.datasource.partitioning.PathPartitionFilter`. Use + with a custom callback to read only selected partitions of a dataset. + By default, this filters out any file paths whose file extension does not + match "*.parquet*". + shuffle: If setting to "files", randomly shuffle input files order before read. + Defaults to not shuffle with ``None``. + arrow_parquet_args: Other parquet read options to pass to PyArrow. For the full + set of arguments, see + the `PyArrow API `_ + include_paths: If ``True``, include the path to each file. File paths are + stored in the ``'path'`` column. + file_extensions: A list of file extensions to filter files by. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run or the total number of output blocks. By default, + concurrency is dynamically decided based on the available resources. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + + Returns: + :class:`~ray.data.Dataset` producing records read from the specified paths. + """ + _emit_meta_provider_deprecation_warning(meta_provider) + + warnings.warn( + "`read_parquet_bulk` is deprecated and will be removed after May 2025. Use " + "`read_parquet` instead.", + DeprecationWarning, + ) + + if meta_provider is None: + meta_provider = FastFileMetadataProvider() + read_table_args = _resolve_parquet_args( + tensor_column_schema, + **arrow_parquet_args, + ) + if columns is not None: + read_table_args["columns"] = columns + + datasource = ParquetBulkDatasource( + paths, + read_table_args=read_table_args, + filesystem=filesystem, + open_stream_args=arrow_open_file_args, + meta_provider=meta_provider, + partition_filter=partition_filter, + shuffle=shuffle, + include_paths=include_paths, + file_extensions=file_extensions, + ) + return read_datasource( + datasource, + parallelism=parallelism, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + override_num_blocks=override_num_blocks, + ) + + +@PublicAPI +def read_json( + paths: Union[str, List[str]], + *, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + parallelism: int = -1, + ray_remote_args: Dict[str, Any] = None, + arrow_open_stream_args: Optional[Dict[str, Any]] = None, + meta_provider: Optional[BaseFileMetadataProvider] = None, + partition_filter: Optional[PathPartitionFilter] = None, + partitioning: Partitioning = Partitioning("hive"), + include_paths: bool = False, + ignore_missing_paths: bool = False, + shuffle: Union[Literal["files"], None] = None, + file_extensions: Optional[List[str]] = JSONDatasource._FILE_EXTENSIONS, + concurrency: Optional[int] = None, + override_num_blocks: Optional[int] = None, + **arrow_json_args, +) -> Dataset: + """Creates a :class:`~ray.data.Dataset` from JSON and JSONL files. + + For JSON file, the whole file is read as one row. + For JSONL file, each line of file is read as separate row. + + Examples: + Read a JSON file in remote storage. + + >>> import ray + >>> ds = ray.data.read_json("s3://anonymous@ray-example-data/log.json") + >>> ds.schema() + Column Type + ------ ---- + timestamp timestamp[...] + size int64 + + Read a JSONL file in remote storage. + + >>> ds = ray.data.read_json("s3://anonymous@ray-example-data/train.jsonl") + >>> ds.schema() + Column Type + ------ ---- + input string + + Read multiple local files. + + >>> ray.data.read_json( # doctest: +SKIP + ... ["local:///path/to/file1", "local:///path/to/file2"]) + + Read multiple directories. + + >>> ray.data.read_json( # doctest: +SKIP + ... ["s3://bucket/path1", "s3://bucket/path2"]) + + By default, :meth:`~ray.data.read_json` parses + `Hive-style partitions `_ + from file paths. If your data adheres to a different partitioning scheme, set + the ``partitioning`` parameter. + + >>> ds = ray.data.read_json("s3://anonymous@ray-example-data/year=2022/month=09/sales.json") + >>> ds.take(1) + [{'order_number': 10107, 'quantity': 30, 'year': '2022', 'month': '09'}] + + Args: + paths: A single file or directory, or a list of file or directory paths. + A list of paths can contain both files and directories. + filesystem: The PyArrow filesystem + implementation to read from. These filesystems are specified in the + `PyArrow docs `_. Specify this parameter if + you need to provide specific configurations to the filesystem. By default, + the filesystem is automatically selected based on the scheme of the paths. + For example, if the path begins with ``s3://``, the `S3FileSystem` is used. + parallelism: This argument is deprecated. Use ``override_num_blocks`` argument. + ray_remote_args: kwargs passed to :meth:`~ray.remote` in the read tasks. + arrow_open_stream_args: kwargs passed to + `pyarrow.fs.FileSystem.open_input_file `_. + when opening input files to read. + meta_provider: [Deprecated] A :ref:`file metadata provider `. + Custom metadata providers may be able to resolve file metadata more quickly + and/or accurately. In most cases, you do not need to set this. If ``None``, + this function uses a system-chosen implementation. + partition_filter: A + :class:`~ray.data.datasource.partitioning.PathPartitionFilter`. + Use with a custom callback to read only selected partitions of a + dataset. + By default, this filters out any file paths whose file extension does not + match "*.json" or "*.jsonl". + partitioning: A :class:`~ray.data.datasource.partitioning.Partitioning` object + that describes how paths are organized. By default, this function parses + `Hive-style partitions `_. + include_paths: If ``True``, include the path to each file. File paths are + stored in the ``'path'`` column. + ignore_missing_paths: If True, ignores any file paths in ``paths`` that are not + found. Defaults to False. + shuffle: If setting to "files", randomly shuffle input files order before read. + Defaults to not shuffle with ``None``. + arrow_json_args: JSON read options to pass to `pyarrow.json.read_json `_. + file_extensions: A list of file extensions to filter files by. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run or the total number of output blocks. By default, + concurrency is dynamically decided based on the available resources. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + + Returns: + :class:`~ray.data.Dataset` producing records read from the specified paths. + """ # noqa: E501 + _emit_meta_provider_deprecation_warning(meta_provider) + + if meta_provider is None: + meta_provider = DefaultFileMetadataProvider() + + datasource = JSONDatasource( + paths, + arrow_json_args=arrow_json_args, + filesystem=filesystem, + open_stream_args=arrow_open_stream_args, + meta_provider=meta_provider, + partition_filter=partition_filter, + partitioning=partitioning, + ignore_missing_paths=ignore_missing_paths, + shuffle=shuffle, + include_paths=include_paths, + file_extensions=file_extensions, + ) + return read_datasource( + datasource, + parallelism=parallelism, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + override_num_blocks=override_num_blocks, + ) + + +@PublicAPI +def read_csv( + paths: Union[str, List[str]], + *, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + parallelism: int = -1, + ray_remote_args: Dict[str, Any] = None, + arrow_open_stream_args: Optional[Dict[str, Any]] = None, + meta_provider: Optional[BaseFileMetadataProvider] = None, + partition_filter: Optional[PathPartitionFilter] = None, + partitioning: Partitioning = Partitioning("hive"), + include_paths: bool = False, + ignore_missing_paths: bool = False, + shuffle: Union[Literal["files"], None] = None, + file_extensions: Optional[List[str]] = None, + concurrency: Optional[int] = None, + override_num_blocks: Optional[int] = None, + **arrow_csv_args, +) -> Dataset: + """Creates a :class:`~ray.data.Dataset` from CSV files. + + Examples: + Read a file in remote storage. + + >>> import ray + >>> ds = ray.data.read_csv("s3://anonymous@ray-example-data/iris.csv") + >>> ds.schema() + Column Type + ------ ---- + sepal length (cm) double + sepal width (cm) double + petal length (cm) double + petal width (cm) double + target int64 + + Read multiple local files. + + >>> ray.data.read_csv( # doctest: +SKIP + ... ["local:///path/to/file1", "local:///path/to/file2"]) + + Read a directory from remote storage. + + >>> ds = ray.data.read_csv("s3://anonymous@ray-example-data/iris-csv/") + + Read files that use a different delimiter. For more uses of ParseOptions see + https://arrow.apache.org/docs/python/generated/pyarrow.csv.ParseOptions.html # noqa: #501 + + >>> from pyarrow import csv + >>> parse_options = csv.ParseOptions(delimiter="\\t") + >>> ds = ray.data.read_csv( + ... "s3://anonymous@ray-example-data/iris.tsv", + ... parse_options=parse_options) + >>> ds.schema() + Column Type + ------ ---- + sepal.length double + sepal.width double + petal.length double + petal.width double + variety string + + Convert a date column with a custom format from a CSV file. For more uses of ConvertOptions see https://arrow.apache.org/docs/python/generated/pyarrow.csv.ConvertOptions.html # noqa: #501 + + >>> from pyarrow import csv + >>> convert_options = csv.ConvertOptions( + ... timestamp_parsers=["%m/%d/%Y"]) + >>> ds = ray.data.read_csv( + ... "s3://anonymous@ray-example-data/dow_jones.csv", + ... convert_options=convert_options) + + By default, :meth:`~ray.data.read_csv` parses + `Hive-style partitions `_ + from file paths. If your data adheres to a different partitioning scheme, set + the ``partitioning`` parameter. + + >>> ds = ray.data.read_csv("s3://anonymous@ray-example-data/year=2022/month=09/sales.csv") + >>> ds.take(1) + [{'order_number': 10107, 'quantity': 30, 'year': '2022', 'month': '09'}] + + By default, :meth:`~ray.data.read_csv` reads all files from file paths. If you want to filter + files by file extensions, set the ``file_extensions`` parameter. + + Read only ``*.csv`` files from a directory. + + >>> ray.data.read_csv("s3://anonymous@ray-example-data/different-extensions/", + ... file_extensions=["csv"]) + Dataset(num_rows=?, schema={a: int64, b: int64}) + + Args: + paths: A single file or directory, or a list of file or directory paths. + A list of paths can contain both files and directories. + filesystem: The PyArrow filesystem + implementation to read from. These filesystems are specified in the + `pyarrow docs `_. Specify this parameter if + you need to provide specific configurations to the filesystem. By default, + the filesystem is automatically selected based on the scheme of the paths. + For example, if the path begins with ``s3://``, the `S3FileSystem` is used. + parallelism: This argument is deprecated. Use ``override_num_blocks`` argument. + ray_remote_args: kwargs passed to :meth:`~ray.remote` in the read tasks. + arrow_open_stream_args: kwargs passed to + `pyarrow.fs.FileSystem.open_input_file `_. + when opening input files to read. + meta_provider: [Deprecated] A :ref:`file metadata provider `. + Custom metadata providers may be able to resolve file metadata more quickly + and/or accurately. In most cases, you do not need to set this. If ``None``, + this function uses a system-chosen implementation. + partition_filter: A + :class:`~ray.data.datasource.partitioning.PathPartitionFilter`. + Use with a custom callback to read only selected partitions of a + dataset. By default, no files are filtered. + partitioning: A :class:`~ray.data.datasource.partitioning.Partitioning` object + that describes how paths are organized. By default, this function parses + `Hive-style partitions `_. + include_paths: If ``True``, include the path to each file. File paths are + stored in the ``'path'`` column. + ignore_missing_paths: If True, ignores any file paths in ``paths`` that are not + found. Defaults to False. + shuffle: If setting to "files", randomly shuffle input files order before read. + Defaults to not shuffle with ``None``. + arrow_csv_args: CSV read options to pass to + `pyarrow.csv.open_csv `_ + when opening CSV files. + file_extensions: A list of file extensions to filter files by. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run or the total number of output blocks. By default, + concurrency is dynamically decided based on the available resources. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + + Returns: + :class:`~ray.data.Dataset` producing records read from the specified paths. + """ + _emit_meta_provider_deprecation_warning(meta_provider) + + if meta_provider is None: + meta_provider = DefaultFileMetadataProvider() + + datasource = CSVDatasource( + paths, + arrow_csv_args=arrow_csv_args, + filesystem=filesystem, + open_stream_args=arrow_open_stream_args, + meta_provider=meta_provider, + partition_filter=partition_filter, + partitioning=partitioning, + ignore_missing_paths=ignore_missing_paths, + shuffle=shuffle, + include_paths=include_paths, + file_extensions=file_extensions, + ) + return read_datasource( + datasource, + parallelism=parallelism, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + override_num_blocks=override_num_blocks, + ) + + +@PublicAPI +def read_text( + paths: Union[str, List[str]], + *, + encoding: str = "utf-8", + drop_empty_lines: bool = True, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + parallelism: int = -1, + ray_remote_args: Optional[Dict[str, Any]] = None, + arrow_open_stream_args: Optional[Dict[str, Any]] = None, + meta_provider: Optional[BaseFileMetadataProvider] = None, + partition_filter: Optional[PathPartitionFilter] = None, + partitioning: Partitioning = None, + include_paths: bool = False, + ignore_missing_paths: bool = False, + shuffle: Union[Literal["files"], None] = None, + file_extensions: Optional[List[str]] = None, + concurrency: Optional[int] = None, + override_num_blocks: Optional[int] = None, +) -> Dataset: + """Create a :class:`~ray.data.Dataset` from lines stored in text files. + + Examples: + Read a file in remote storage. + + >>> import ray + >>> ds = ray.data.read_text("s3://anonymous@ray-example-data/this.txt") + >>> ds.schema() + Column Type + ------ ---- + text string + + Read multiple local files. + + >>> ray.data.read_text( # doctest: +SKIP + ... ["local:///path/to/file1", "local:///path/to/file2"]) + + Args: + paths: A single file or directory, or a list of file or directory paths. + A list of paths can contain both files and directories. + encoding: The encoding of the files (e.g., "utf-8" or "ascii"). + filesystem: The PyArrow filesystem + implementation to read from. These filesystems are specified in the + `PyArrow docs `_. Specify this parameter if + you need to provide specific configurations to the filesystem. By default, + the filesystem is automatically selected based on the scheme of the paths. + For example, if the path begins with ``s3://``, the `S3FileSystem` is used. + parallelism: This argument is deprecated. Use ``override_num_blocks`` argument. + ray_remote_args: kwargs passed to :meth:`~ray.remote` in the read tasks and + in the subsequent text decoding map task. + arrow_open_stream_args: kwargs passed to + `pyarrow.fs.FileSystem.open_input_file `_. + when opening input files to read. + meta_provider: [Deprecated] A :ref:`file metadata provider `. + Custom metadata providers may be able to resolve file metadata more quickly + and/or accurately. In most cases, you do not need to set this. If ``None``, + this function uses a system-chosen implementation. + partition_filter: A + :class:`~ray.data.datasource.partitioning.PathPartitionFilter`. + Use with a custom callback to read only selected partitions of a + dataset. By default, no files are filtered. + partitioning: A :class:`~ray.data.datasource.partitioning.Partitioning` object + that describes how paths are organized. Defaults to ``None``. + include_paths: If ``True``, include the path to each file. File paths are + stored in the ``'path'`` column. + ignore_missing_paths: If True, ignores any file paths in ``paths`` that are not + found. Defaults to False. + shuffle: If setting to "files", randomly shuffle input files order before read. + Defaults to not shuffle with ``None``. + file_extensions: A list of file extensions to filter files by. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run or the total number of output blocks. By default, + concurrency is dynamically decided based on the available resources. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + + Returns: + :class:`~ray.data.Dataset` producing lines of text read from the specified + paths. + """ + _emit_meta_provider_deprecation_warning(meta_provider) + + if meta_provider is None: + meta_provider = DefaultFileMetadataProvider() + + datasource = TextDatasource( + paths, + drop_empty_lines=drop_empty_lines, + encoding=encoding, + filesystem=filesystem, + open_stream_args=arrow_open_stream_args, + meta_provider=meta_provider, + partition_filter=partition_filter, + partitioning=partitioning, + ignore_missing_paths=ignore_missing_paths, + shuffle=shuffle, + include_paths=include_paths, + file_extensions=file_extensions, + ) + return read_datasource( + datasource, + parallelism=parallelism, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + override_num_blocks=override_num_blocks, + ) + + +@PublicAPI +def read_avro( + paths: Union[str, List[str]], + *, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + parallelism: int = -1, + ray_remote_args: Optional[Dict[str, Any]] = None, + arrow_open_stream_args: Optional[Dict[str, Any]] = None, + meta_provider: Optional[BaseFileMetadataProvider] = None, + partition_filter: Optional[PathPartitionFilter] = None, + partitioning: Partitioning = None, + include_paths: bool = False, + ignore_missing_paths: bool = False, + shuffle: Union[Literal["files"], None] = None, + file_extensions: Optional[List[str]] = None, + concurrency: Optional[int] = None, + override_num_blocks: Optional[int] = None, +) -> Dataset: + """Create a :class:`~ray.data.Dataset` from records stored in Avro files. + + Examples: + Read an Avro file in remote storage or local storage. + + >>> import ray + >>> ds = ray.data.read_avro("s3://anonymous@ray-example-data/mnist.avro") + >>> ds.schema() + Column Type + ------ ---- + features list + label int64 + dataType string + + >>> ray.data.read_avro( # doctest: +SKIP + ... ["local:///path/to/file1", "local:///path/to/file2"]) + + Args: + paths: A single file or directory, or a list of file or directory paths. + A list of paths can contain both files and directories. + filesystem: The PyArrow filesystem + implementation to read from. These filesystems are specified in the + `PyArrow docs `_. Specify this parameter if + you need to provide specific configurations to the filesystem. By default, + the filesystem is automatically selected based on the scheme of the paths. + For example, if the path begins with ``s3://``, the `S3FileSystem` is used. + parallelism: This argument is deprecated. Use ``override_num_blocks`` argument. + ray_remote_args: kwargs passed to :meth:`~ray.remote` in the read tasks and + in the subsequent text decoding map task. + arrow_open_stream_args: kwargs passed to + `pyarrow.fs.FileSystem.open_input_file `_. + when opening input files to read. + meta_provider: [Deprecated] A :ref:`file metadata provider `. + Custom metadata providers may be able to resolve file metadata more quickly + and/or accurately. In most cases, you do not need to set this. If ``None``, + this function uses a system-chosen implementation. + partition_filter: A + :class:`~ray.data.datasource.partitioning.PathPartitionFilter`. + Use with a custom callback to read only selected partitions of a + dataset. By default, no files are filtered. + partitioning: A :class:`~ray.data.datasource.partitioning.Partitioning` object + that describes how paths are organized. Defaults to ``None``. + include_paths: If ``True``, include the path to each file. File paths are + stored in the ``'path'`` column. + ignore_missing_paths: If True, ignores any file paths in ``paths`` that are not + found. Defaults to False. + shuffle: If setting to "files", randomly shuffle input files order before read. + Defaults to not shuffle with ``None``. + file_extensions: A list of file extensions to filter files by. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run or the total number of output blocks. By default, + concurrency is dynamically decided based on the available resources. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + + Returns: + :class:`~ray.data.Dataset` holding records from the Avro files. + """ + _emit_meta_provider_deprecation_warning(meta_provider) + + if meta_provider is None: + meta_provider = DefaultFileMetadataProvider() + + datasource = AvroDatasource( + paths, + filesystem=filesystem, + open_stream_args=arrow_open_stream_args, + meta_provider=meta_provider, + partition_filter=partition_filter, + partitioning=partitioning, + ignore_missing_paths=ignore_missing_paths, + shuffle=shuffle, + include_paths=include_paths, + file_extensions=file_extensions, + ) + return read_datasource( + datasource, + parallelism=parallelism, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + override_num_blocks=override_num_blocks, + ) + + +@PublicAPI +def read_numpy( + paths: Union[str, List[str]], + *, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + parallelism: int = -1, + arrow_open_stream_args: Optional[Dict[str, Any]] = None, + meta_provider: Optional[BaseFileMetadataProvider] = None, + partition_filter: Optional[PathPartitionFilter] = None, + partitioning: Partitioning = None, + include_paths: bool = False, + ignore_missing_paths: bool = False, + shuffle: Union[Literal["files"], None] = None, + file_extensions: Optional[List[str]] = NumpyDatasource._FILE_EXTENSIONS, + concurrency: Optional[int] = None, + override_num_blocks: Optional[int] = None, + **numpy_load_args, +) -> Dataset: + """Create an Arrow dataset from numpy files. + + Examples: + Read a directory of files in remote storage. + + >>> import ray + >>> ray.data.read_numpy("s3://bucket/path") # doctest: +SKIP + + Read multiple local files. + + >>> ray.data.read_numpy(["/path/to/file1", "/path/to/file2"]) # doctest: +SKIP + + Read multiple directories. + + >>> ray.data.read_numpy( # doctest: +SKIP + ... ["s3://bucket/path1", "s3://bucket/path2"]) + + Args: + paths: A single file/directory path or a list of file/directory paths. + A list of paths can contain both files and directories. + filesystem: The filesystem implementation to read from. + parallelism: This argument is deprecated. Use ``override_num_blocks`` argument. + arrow_open_stream_args: kwargs passed to + `pyarrow.fs.FileSystem.open_input_stream `_. + numpy_load_args: Other options to pass to np.load. + meta_provider: File metadata provider. Custom metadata providers may + be able to resolve file metadata more quickly and/or accurately. If + ``None``, this function uses a system-chosen implementation. + partition_filter: Path-based partition filter, if any. Can be used + with a custom callback to read only selected partitions of a dataset. + By default, this filters out any file paths whose file extension does not + match "*.npy*". + partitioning: A :class:`~ray.data.datasource.partitioning.Partitioning` object + that describes how paths are organized. Defaults to ``None``. + include_paths: If ``True``, include the path to each file. File paths are + stored in the ``'path'`` column. + ignore_missing_paths: If True, ignores any file paths in ``paths`` that are not + found. Defaults to False. + shuffle: If setting to "files", randomly shuffle input files order before read. + Defaults to not shuffle with ``None``. + file_extensions: A list of file extensions to filter files by. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run or the total number of output blocks. By default, + concurrency is dynamically decided based on the available resources. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + + Returns: + Dataset holding Tensor records read from the specified paths. + """ # noqa: E501 + _emit_meta_provider_deprecation_warning(meta_provider) + + if meta_provider is None: + meta_provider = DefaultFileMetadataProvider() + + datasource = NumpyDatasource( + paths, + numpy_load_args=numpy_load_args, + filesystem=filesystem, + open_stream_args=arrow_open_stream_args, + meta_provider=meta_provider, + partition_filter=partition_filter, + partitioning=partitioning, + ignore_missing_paths=ignore_missing_paths, + shuffle=shuffle, + include_paths=include_paths, + file_extensions=file_extensions, + ) + return read_datasource( + datasource, + parallelism=parallelism, + concurrency=concurrency, + override_num_blocks=override_num_blocks, + ) + + +@PublicAPI(stability="alpha") +def read_tfrecords( + paths: Union[str, List[str]], + *, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + parallelism: int = -1, + arrow_open_stream_args: Optional[Dict[str, Any]] = None, + meta_provider: Optional[BaseFileMetadataProvider] = None, + partition_filter: Optional[PathPartitionFilter] = None, + include_paths: bool = False, + ignore_missing_paths: bool = False, + tf_schema: Optional["schema_pb2.Schema"] = None, + shuffle: Union[Literal["files"], None] = None, + file_extensions: Optional[List[str]] = None, + concurrency: Optional[int] = None, + override_num_blocks: Optional[int] = None, + tfx_read_options: Optional["TFXReadOptions"] = None, +) -> Dataset: + """Create a :class:`~ray.data.Dataset` from TFRecord files that contain + `tf.train.Example `_ + messages. + + .. tip:: + Using the ``tfx-bsl`` library is more performant when reading large + datasets (for example, in production use cases). To use this + implementation, you must first install ``tfx-bsl``: + + 1. `pip install tfx_bsl --no-dependencies` + 2. Pass tfx_read_options to read_tfrecords, for example: + `ds = read_tfrecords(path, ..., tfx_read_options=TFXReadOptions())` + + .. warning:: + This function exclusively supports ``tf.train.Example`` messages. If a file + contains a message that isn't of type ``tf.train.Example``, then this function + fails. + + Examples: + >>> import ray + >>> ray.data.read_tfrecords("s3://anonymous@ray-example-data/iris.tfrecords") + Dataset( + num_rows=?, + schema={...} + ) + + We can also read compressed TFRecord files, which use one of the + `compression types supported by Arrow `_: + + >>> ray.data.read_tfrecords( + ... "s3://anonymous@ray-example-data/iris.tfrecords.gz", + ... arrow_open_stream_args={"compression": "gzip"}, + ... ) + Dataset( + num_rows=?, + schema={...} + ) + + Args: + paths: A single file or directory, or a list of file or directory paths. + A list of paths can contain both files and directories. + filesystem: The PyArrow filesystem + implementation to read from. These filesystems are specified in the + `PyArrow docs `_. Specify this parameter if + you need to provide specific configurations to the filesystem. By default, + the filesystem is automatically selected based on the scheme of the paths. + For example, if the path begins with ``s3://``, the `S3FileSystem` is used. + parallelism: This argument is deprecated. Use ``override_num_blocks`` argument. + arrow_open_stream_args: kwargs passed to + `pyarrow.fs.FileSystem.open_input_file `_. + when opening input files to read. To read a compressed TFRecord file, + pass the corresponding compression type (e.g., for ``GZIP`` or ``ZLIB``), + use ``arrow_open_stream_args={'compression': 'gzip'}``). + meta_provider: [Deprecated] A :ref:`file metadata provider `. + Custom metadata providers may be able to resolve file metadata more quickly + and/or accurately. In most cases, you do not need to set this. If ``None``, + this function uses a system-chosen implementation. + partition_filter: A + :class:`~ray.data.datasource.partitioning.PathPartitionFilter`. + Use with a custom callback to read only selected partitions of a + dataset. + include_paths: If ``True``, include the path to each file. File paths are + stored in the ``'path'`` column. + ignore_missing_paths: If True, ignores any file paths in ``paths`` that are not + found. Defaults to False. + tf_schema: Optional TensorFlow Schema which is used to explicitly set the schema + of the underlying Dataset. + shuffle: If setting to "files", randomly shuffle input files order before read. + Defaults to not shuffle with ``None``. + file_extensions: A list of file extensions to filter files by. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run or the total number of output blocks. By default, + concurrency is dynamically decided based on the available resources. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + tfx_read_options: Specifies read options when reading TFRecord files with TFX. + When no options are provided, the default version without tfx-bsl will + be used to read the tfrecords. + Returns: + A :class:`~ray.data.Dataset` that contains the example features. + + Raises: + ValueError: If a file contains a message that isn't a ``tf.train.Example``. + """ + import platform + + _emit_meta_provider_deprecation_warning(meta_provider) + + tfx_read = False + + if tfx_read_options and platform.processor() != "arm": + try: + import tfx_bsl # noqa: F401 + + tfx_read = True + except ModuleNotFoundError: + # override the tfx_read_options given that tfx-bsl is not installed + tfx_read_options = None + logger.warning( + "Please install tfx-bsl package with" + " `pip install tfx_bsl --no-dependencies`." + " This can help speed up the reading of large TFRecord files." + ) + + if meta_provider is None: + meta_provider = DefaultFileMetadataProvider() + datasource = TFRecordDatasource( + paths, + tf_schema=tf_schema, + filesystem=filesystem, + open_stream_args=arrow_open_stream_args, + meta_provider=meta_provider, + partition_filter=partition_filter, + ignore_missing_paths=ignore_missing_paths, + shuffle=shuffle, + include_paths=include_paths, + file_extensions=file_extensions, + tfx_read_options=tfx_read_options, + ) + ds = read_datasource( + datasource, + parallelism=parallelism, + concurrency=concurrency, + override_num_blocks=override_num_blocks, + ) + + if ( + tfx_read_options + and tfx_read_options.auto_infer_schema + and tfx_read + and not tf_schema + ): + from ray.data._internal.datasource.tfrecords_datasource import ( + _infer_schema_and_transform, + ) + + return _infer_schema_and_transform(ds) + + return ds + + +@PublicAPI(stability="alpha") +def read_webdataset( + paths: Union[str, List[str]], + *, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + parallelism: int = -1, + arrow_open_stream_args: Optional[Dict[str, Any]] = None, + meta_provider: Optional[BaseFileMetadataProvider] = None, + partition_filter: Optional[PathPartitionFilter] = None, + decoder: Optional[Union[bool, str, callable, list]] = True, + fileselect: Optional[Union[list, callable]] = None, + filerename: Optional[Union[list, callable]] = None, + suffixes: Optional[Union[list, callable]] = None, + verbose_open: bool = False, + shuffle: Union[Literal["files"], None] = None, + include_paths: bool = False, + file_extensions: Optional[List[str]] = None, + concurrency: Optional[int] = None, + override_num_blocks: Optional[int] = None, +) -> Dataset: + """Create a :class:`~ray.data.Dataset` from + `WebDataset `_ files. + + Args: + paths: A single file/directory path or a list of file/directory paths. + A list of paths can contain both files and directories. + filesystem: The filesystem implementation to read from. + parallelism: This argument is deprecated. Use ``override_num_blocks`` argument. + arrow_open_stream_args: Key-word arguments passed to + `pyarrow.fs.FileSystem.open_input_stream `_. + To read a compressed TFRecord file, + pass the corresponding compression type (e.g. for ``GZIP`` or ``ZLIB``, use + ``arrow_open_stream_args={'compression': 'gzip'}``). + meta_provider: File metadata provider. Custom metadata providers may + be able to resolve file metadata more quickly and/or accurately. If + ``None``, this function uses a system-chosen implementation. + partition_filter: Path-based partition filter, if any. Can be used + with a custom callback to read only selected partitions of a dataset. + decoder: A function or list of functions to decode the data. + fileselect: A callable or list of glob patterns to select files. + filerename: A function or list of tuples to rename files prior to grouping. + suffixes: A function or list of suffixes to select for creating samples. + verbose_open: Whether to print the file names as they are opened. + shuffle: If setting to "files", randomly shuffle input files order before read. + Defaults to not shuffle with ``None``. + include_paths: If ``True``, include the path to each file. File paths are + stored in the ``'path'`` column. + file_extensions: A list of file extensions to filter files by. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run or the total number of output blocks. By default, + concurrency is dynamically decided based on the available resources. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + + Returns: + A :class:`~ray.data.Dataset` that contains the example features. + + Raises: + ValueError: If a file contains a message that isn't a `tf.train.Example`_. + + .. _tf.train.Example: https://www.tensorflow.org/api_docs/python/tf/train/Example + """ # noqa: E501 + _emit_meta_provider_deprecation_warning(meta_provider) + + if meta_provider is None: + meta_provider = DefaultFileMetadataProvider() + + datasource = WebDatasetDatasource( + paths, + decoder=decoder, + fileselect=fileselect, + filerename=filerename, + suffixes=suffixes, + verbose_open=verbose_open, + filesystem=filesystem, + open_stream_args=arrow_open_stream_args, + meta_provider=meta_provider, + partition_filter=partition_filter, + shuffle=shuffle, + include_paths=include_paths, + file_extensions=file_extensions, + ) + return read_datasource( + datasource, + parallelism=parallelism, + concurrency=concurrency, + override_num_blocks=override_num_blocks, + ) + + +@PublicAPI +def read_binary_files( + paths: Union[str, List[str]], + *, + include_paths: bool = False, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + parallelism: int = -1, + ray_remote_args: Dict[str, Any] = None, + arrow_open_stream_args: Optional[Dict[str, Any]] = None, + meta_provider: Optional[BaseFileMetadataProvider] = None, + partition_filter: Optional[PathPartitionFilter] = None, + partitioning: Partitioning = None, + ignore_missing_paths: bool = False, + shuffle: Union[Literal["files"], None] = None, + file_extensions: Optional[List[str]] = None, + concurrency: Optional[int] = None, + override_num_blocks: Optional[int] = None, +) -> Dataset: + """Create a :class:`~ray.data.Dataset` from binary files of arbitrary contents. + + Examples: + Read a file in remote storage. + + >>> import ray + >>> path = "s3://anonymous@ray-example-data/pdf-sample_0.pdf" + >>> ds = ray.data.read_binary_files(path) + >>> ds.schema() + Column Type + ------ ---- + bytes binary + + Read multiple local files. + + >>> ray.data.read_binary_files( # doctest: +SKIP + ... ["local:///path/to/file1", "local:///path/to/file2"]) + + Read a file with the filepaths included as a column in the dataset. + + >>> path = "s3://anonymous@ray-example-data/pdf-sample_0.pdf" + >>> ds = ray.data.read_binary_files(path, include_paths=True) + >>> ds.take(1)[0]["path"] + 'ray-example-data/pdf-sample_0.pdf' + + + Args: + paths: A single file or directory, or a list of file or directory paths. + A list of paths can contain both files and directories. + include_paths: If ``True``, include the path to each file. File paths are + stored in the ``'path'`` column. + filesystem: The PyArrow filesystem + implementation to read from. These filesystems are specified in the + `PyArrow docs `_. Specify this parameter if + you need to provide specific configurations to the filesystem. By default, + the filesystem is automatically selected based on the scheme of the paths. + For example, if the path begins with ``s3://``, the `S3FileSystem` is used. + ray_remote_args: kwargs passed to :meth:`~ray.remote` in the read tasks. + parallelism: This argument is deprecated. Use ``override_num_blocks`` argument. + arrow_open_stream_args: kwargs passed to + `pyarrow.fs.FileSystem.open_input_file `_. + meta_provider: [Deprecated] A :ref:`file metadata provider `. + Custom metadata providers may be able to resolve file metadata more quickly + and/or accurately. In most cases, you do not need to set this. If ``None``, + this function uses a system-chosen implementation. + partition_filter: A + :class:`~ray.data.datasource.partitioning.PathPartitionFilter`. + Use with a custom callback to read only selected partitions of a + dataset. By default, no files are filtered. + By default, this does not filter out any files. + partitioning: A :class:`~ray.data.datasource.partitioning.Partitioning` object + that describes how paths are organized. Defaults to ``None``. + ignore_missing_paths: If True, ignores any file paths in ``paths`` that are not + found. Defaults to False. + shuffle: If setting to "files", randomly shuffle input files order before read. + Defaults to not shuffle with ``None``. + file_extensions: A list of file extensions to filter files by. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run or the total number of output blocks. By default, + concurrency is dynamically decided based on the available resources. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + + Returns: + :class:`~ray.data.Dataset` producing rows read from the specified paths. + """ + _emit_meta_provider_deprecation_warning(meta_provider) + + if meta_provider is None: + meta_provider = DefaultFileMetadataProvider() + + datasource = BinaryDatasource( + paths, + include_paths=include_paths, + filesystem=filesystem, + open_stream_args=arrow_open_stream_args, + meta_provider=meta_provider, + partition_filter=partition_filter, + partitioning=partitioning, + ignore_missing_paths=ignore_missing_paths, + shuffle=shuffle, + file_extensions=file_extensions, + ) + return read_datasource( + datasource, + parallelism=parallelism, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + override_num_blocks=override_num_blocks, + ) + + +@PublicAPI(stability="alpha") +def read_sql( + sql: str, + connection_factory: Callable[[], Connection], + *, + parallelism: int = -1, + ray_remote_args: Optional[Dict[str, Any]] = None, + concurrency: Optional[int] = None, + override_num_blocks: Optional[int] = None, +) -> Dataset: + """Read from a database that provides a + `Python DB API2-compliant `_ connector. + + .. note:: + + By default, ``read_sql`` launches multiple read tasks, and each task executes a + ``LIMIT`` and ``OFFSET`` to fetch a subset of the rows. However, for many + databases, ``OFFSET`` is slow. + + As a workaround, set ``override_num_blocks=1`` to directly fetch all rows in a + single task. Note that this approach requires all result rows to fit in the + memory of single task. If the rows don't fit, your program may raise an out of + memory error. + + Examples: + + For examples of reading from larger databases like MySQL and PostgreSQL, see + :ref:`Reading from SQL Databases `. + + .. testcode:: + + import sqlite3 + + import ray + + # Create a simple database + connection = sqlite3.connect("example.db") + connection.execute("CREATE TABLE movie(title, year, score)") + connection.execute( + \"\"\" + INSERT INTO movie VALUES + ('Monty Python and the Holy Grail', 1975, 8.2), + ("Monty Python Live at the Hollywood Bowl", 1982, 7.9), + ("Monty Python's Life of Brian", 1979, 8.0), + ("Rocky II", 1979, 7.3) + \"\"\" + ) + connection.commit() + connection.close() + + def create_connection(): + return sqlite3.connect("example.db") + + # Get all movies + ds = ray.data.read_sql("SELECT * FROM movie", create_connection) + # Get movies after the year 1980 + ds = ray.data.read_sql( + "SELECT title, score FROM movie WHERE year >= 1980", create_connection + ) + # Get the number of movies per year + ds = ray.data.read_sql( + "SELECT year, COUNT(*) FROM movie GROUP BY year", create_connection + ) + + .. testcode:: + :hide: + + import os + os.remove("example.db") + + Args: + sql: The SQL query to execute. + connection_factory: A function that takes no arguments and returns a + Python DB API2 + `Connection object `_. + parallelism: This argument is deprecated. Use ``override_num_blocks`` argument. + ray_remote_args: kwargs passed to :meth:`~ray.remote` in the read tasks. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run or the total number of output blocks. By default, + concurrency is dynamically decided based on the available resources. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + + Returns: + A :class:`Dataset` containing the queried data. + """ + datasource = SQLDatasource(sql=sql, connection_factory=connection_factory) + return read_datasource( + datasource, + parallelism=parallelism, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + override_num_blocks=override_num_blocks, + ) + + +@PublicAPI(stability="alpha") +def read_databricks_tables( + *, + warehouse_id: str, + table: Optional[str] = None, + query: Optional[str] = None, + catalog: Optional[str] = None, + schema: Optional[str] = None, + parallelism: int = -1, + ray_remote_args: Optional[Dict[str, Any]] = None, + concurrency: Optional[int] = None, + override_num_blocks: Optional[int] = None, +) -> Dataset: + """Read a Databricks unity catalog table or Databricks SQL execution result. + + Before calling this API, set the ``DATABRICKS_TOKEN`` environment + variable to your Databricks warehouse access token. + + .. code-block:: console + + export DATABRICKS_TOKEN=... + + If you're not running your program on the Databricks runtime, also set the + ``DATABRICKS_HOST`` environment variable. + + .. code-block:: console + + export DATABRICKS_HOST=adb-..azuredatabricks.net + + .. note:: + + This function is built on the + `Databricks statement execution API `_. + + Examples: + + .. testcode:: + :skipif: True + + import ray + + ds = ray.data.read_databricks_tables( + warehouse_id='...', + catalog='catalog_1', + schema='db_1', + query='select id from table_1 limit 750000', + ) + + Args: + warehouse_id: The ID of the Databricks warehouse. The query statement is + executed on this warehouse. + table: The name of UC table you want to read. If this argument is set, + you can't set ``query`` argument, and the reader generates query + of ``select * from {table_name}`` under the hood. + query: The query you want to execute. If this argument is set, + you can't set ``table_name`` argument. + catalog: (Optional) The default catalog name used by the query. + schema: (Optional) The default schema used by the query. + parallelism: This argument is deprecated. Use ``override_num_blocks`` argument. + ray_remote_args: kwargs passed to :meth:`~ray.remote` in the read tasks. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run or the total number of output blocks. By default, + concurrency is dynamically decided based on the available resources. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + + Returns: + A :class:`Dataset` containing the queried data. + """ # noqa: E501 + from ray.data._internal.datasource.databricks_uc_datasource import ( + DatabricksUCDatasource, + ) + from ray.util.spark.utils import get_spark_session, is_in_databricks_runtime + + def get_dbutils(): + no_dbutils_error = RuntimeError("No dbutils module found.") + try: + import IPython + + ip_shell = IPython.get_ipython() + if ip_shell is None: + raise no_dbutils_error + return ip_shell.ns_table["user_global"]["dbutils"] + except ImportError: + raise no_dbutils_error + except KeyError: + raise no_dbutils_error + + token = os.environ.get("DATABRICKS_TOKEN") + + if not token: + raise ValueError( + "Please set environment variable 'DATABRICKS_TOKEN' to " + "databricks workspace access token." + ) + + host = os.environ.get("DATABRICKS_HOST") + if not host: + if is_in_databricks_runtime(): + ctx = ( + get_dbutils().notebook.entry_point.getDbutils().notebook().getContext() + ) + host = ctx.tags().get("browserHostName").get() + else: + raise ValueError( + "You are not in databricks runtime, please set environment variable " + "'DATABRICKS_HOST' to databricks workspace URL" + '(e.g. "adb-..azuredatabricks.net").' + ) + + if not catalog: + catalog = get_spark_session().sql("SELECT CURRENT_CATALOG()").collect()[0][0] + + if not schema: + schema = get_spark_session().sql("SELECT CURRENT_DATABASE()").collect()[0][0] + + if query is not None and table is not None: + raise ValueError("Only one of 'query' and 'table' arguments can be set.") + + if table: + query = f"select * from {table}" + + if query is None: + raise ValueError("One of 'query' and 'table' arguments should be set.") + + datasource = DatabricksUCDatasource( + host=host, + token=token, + warehouse_id=warehouse_id, + catalog=catalog, + schema=schema, + query=query, + ) + return read_datasource( + datasource=datasource, + parallelism=parallelism, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + override_num_blocks=override_num_blocks, + ) + + +@PublicAPI(stability="alpha") +def read_hudi( + table_uri: str, + *, + storage_options: Optional[Dict[str, str]] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + concurrency: Optional[int] = None, + override_num_blocks: Optional[int] = None, +) -> Dataset: + """ + Create a :class:`~ray.data.Dataset` from an + `Apache Hudi table `_. + + Examples: + >>> import ray + >>> ds = ray.data.read_hudi( # doctest: +SKIP + ... table_uri="/hudi/trips", + ... ) + + Args: + table_uri: The URI of the Hudi table to read from. Local file paths, S3, and GCS + are supported. + storage_options: Extra options that make sense for a particular storage + connection. This is used to store connection parameters like credentials, + endpoint, etc. See more explanation + `here `_. + ray_remote_args: kwargs passed to :meth:`~ray.remote` in the read tasks. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run or the total number of output blocks. By default, + concurrency is dynamically decided based on the available resources. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + + Returns: + A :class:`~ray.data.Dataset` producing records read from the Hudi table. + """ # noqa: E501 + datasource = HudiDatasource( + table_uri=table_uri, + storage_options=storage_options, + ) + + return read_datasource( + datasource=datasource, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + override_num_blocks=override_num_blocks, + ) + + +@PublicAPI +def from_dask(df: "dask.dataframe.DataFrame") -> MaterializedDataset: + """Create a :class:`~ray.data.Dataset` from a + `Dask DataFrame `_. + + Args: + df: A `Dask DataFrame`_. + + Returns: + A :class:`~ray.data.MaterializedDataset` holding rows read from the DataFrame. + """ # noqa: E501 + import dask + + from ray.util.dask import ray_dask_get + + partitions = df.to_delayed() + persisted_partitions = dask.persist(*partitions, scheduler=ray_dask_get) + + import pandas + + def to_ref(df): + if isinstance(df, pandas.DataFrame): + return ray.put(df) + elif isinstance(df, ray.ObjectRef): + return df + else: + raise ValueError( + "Expected a Ray object ref or a Pandas DataFrame, " f"got {type(df)}" + ) + + ds = from_pandas_refs( + [to_ref(next(iter(part.dask.values()))) for part in persisted_partitions], + ) + return ds + + +@PublicAPI +def from_mars(df: "mars.dataframe.DataFrame") -> MaterializedDataset: + """Create a :class:`~ray.data.Dataset` from a + `Mars DataFrame `_. + + Args: + df: A `Mars DataFrame`_, which must be executed by Mars-on-Ray. + + Returns: + A :class:`~ray.data.MaterializedDataset` holding rows read from the DataFrame. + """ # noqa: E501 + import mars.dataframe as md + + ds: Dataset = md.to_ray_dataset(df) + return ds + + +@PublicAPI +def from_modin(df: "modin.pandas.dataframe.DataFrame") -> MaterializedDataset: + """Create a :class:`~ray.data.Dataset` from a + `Modin DataFrame `_. + + Args: + df: A `Modin DataFrame`_, which must be using the Ray backend. + + Returns: + A :class:`~ray.data.MaterializedDataset` rows read from the DataFrame. + """ # noqa: E501 + from modin.distributed.dataframe.pandas.partitions import unwrap_partitions + + parts = unwrap_partitions(df, axis=0) + ds = from_pandas_refs(parts) + return ds + + +@PublicAPI +def from_pandas( + dfs: Union["pandas.DataFrame", List["pandas.DataFrame"]], + override_num_blocks: Optional[int] = None, +) -> MaterializedDataset: + """Create a :class:`~ray.data.Dataset` from a list of pandas dataframes. + + Examples: + >>> import pandas as pd + >>> import ray + >>> df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + >>> ray.data.from_pandas(df) + MaterializedDataset(num_blocks=1, num_rows=3, schema={a: int64, b: int64}) + + Create a Ray Dataset from a list of Pandas DataFrames. + + >>> ray.data.from_pandas([df, df]) + MaterializedDataset(num_blocks=2, num_rows=6, schema={a: int64, b: int64}) + + Args: + dfs: A pandas dataframe or a list of pandas dataframes. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + + Returns: + :class:`~ray.data.Dataset` holding data read from the dataframes. + """ + import pandas as pd + + if isinstance(dfs, pd.DataFrame): + dfs = [dfs] + + if override_num_blocks is not None: + if len(dfs) > 1: + # I assume most users pass a single DataFrame as input. For simplicity, I'm + # concatenating DataFrames, even though it's not efficient. + ary = pd.concat(dfs, axis=0) + else: + ary = dfs[0] + dfs = np.array_split(ary, override_num_blocks) + + from ray.air.util.data_batch_conversion import ( + _cast_ndarray_columns_to_tensor_extension, + ) + + context = DataContext.get_current() + if context.enable_tensor_extension_casting: + dfs = [_cast_ndarray_columns_to_tensor_extension(df.copy()) for df in dfs] + + return from_pandas_refs([ray.put(df) for df in dfs]) + + +@DeveloperAPI +def from_pandas_refs( + dfs: Union[ObjectRef["pandas.DataFrame"], List[ObjectRef["pandas.DataFrame"]]], +) -> MaterializedDataset: + """Create a :class:`~ray.data.Dataset` from a list of Ray object references to + pandas dataframes. + + Examples: + >>> import pandas as pd + >>> import ray + >>> df_ref = ray.put(pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})) + >>> ray.data.from_pandas_refs(df_ref) + MaterializedDataset(num_blocks=1, num_rows=3, schema={a: int64, b: int64}) + + Create a Ray Dataset from a list of Pandas Dataframes references. + + >>> ray.data.from_pandas_refs([df_ref, df_ref]) + MaterializedDataset(num_blocks=2, num_rows=6, schema={a: int64, b: int64}) + + Args: + dfs: A Ray object reference to a pandas dataframe, or a list of + Ray object references to pandas dataframes. + + Returns: + :class:`~ray.data.Dataset` holding data read from the dataframes. + """ + if isinstance(dfs, ray.ObjectRef): + dfs = [dfs] + elif isinstance(dfs, list): + for df in dfs: + if not isinstance(df, ray.ObjectRef): + raise ValueError( + "Expected list of Ray object refs, " + f"got list containing {type(df)}" + ) + else: + raise ValueError( + "Expected Ray object ref or list of Ray object refs, " f"got {type(df)}" + ) + + context = DataContext.get_current() + if context.enable_pandas_block: + get_metadata = cached_remote_fn(get_table_block_metadata) + metadata = ray.get([get_metadata.remote(df) for df in dfs]) + execution_plan = ExecutionPlan( + DatasetStats(metadata={"FromPandas": metadata}, parent=None) + ) + logical_plan = LogicalPlan(FromPandas(dfs, metadata), execution_plan._context) + return MaterializedDataset( + execution_plan, + logical_plan, + ) + + df_to_block = cached_remote_fn(pandas_df_to_arrow_block, num_returns=2) + + res = [df_to_block.remote(df) for df in dfs] + blocks, metadata = map(list, zip(*res)) + metadata = ray.get(metadata) + execution_plan = ExecutionPlan( + DatasetStats(metadata={"FromPandas": metadata}, parent=None) + ) + logical_plan = LogicalPlan(FromPandas(blocks, metadata), execution_plan._context) + return MaterializedDataset( + execution_plan, + logical_plan, + ) + + +@PublicAPI +def from_numpy(ndarrays: Union[np.ndarray, List[np.ndarray]]) -> MaterializedDataset: + """Creates a :class:`~ray.data.Dataset` from a list of NumPy ndarrays. + + Examples: + >>> import numpy as np + >>> import ray + >>> arr = np.array([1]) + >>> ray.data.from_numpy(arr) + MaterializedDataset(num_blocks=1, num_rows=1, schema={data: int64}) + + Create a Ray Dataset from a list of NumPy arrays. + + >>> ray.data.from_numpy([arr, arr]) + MaterializedDataset(num_blocks=2, num_rows=2, schema={data: int64}) + + Args: + ndarrays: A NumPy ndarray or a list of NumPy ndarrays. + + Returns: + :class:`~ray.data.Dataset` holding data from the given ndarrays. + """ + if isinstance(ndarrays, np.ndarray): + ndarrays = [ndarrays] + + return from_numpy_refs([ray.put(ndarray) for ndarray in ndarrays]) + + +@DeveloperAPI +def from_numpy_refs( + ndarrays: Union[ObjectRef[np.ndarray], List[ObjectRef[np.ndarray]]], +) -> MaterializedDataset: + """Creates a :class:`~ray.data.Dataset` from a list of Ray object references to + NumPy ndarrays. + + Examples: + >>> import numpy as np + >>> import ray + >>> arr_ref = ray.put(np.array([1])) + >>> ray.data.from_numpy_refs(arr_ref) + MaterializedDataset(num_blocks=1, num_rows=1, schema={data: int64}) + + Create a Ray Dataset from a list of NumPy array references. + + >>> ray.data.from_numpy_refs([arr_ref, arr_ref]) + MaterializedDataset(num_blocks=2, num_rows=2, schema={data: int64}) + + Args: + ndarrays: A Ray object reference to a NumPy ndarray or a list of Ray object + references to NumPy ndarrays. + + Returns: + :class:`~ray.data.Dataset` holding data from the given ndarrays. + """ + if isinstance(ndarrays, ray.ObjectRef): + ndarrays = [ndarrays] + elif isinstance(ndarrays, list): + for ndarray in ndarrays: + if not isinstance(ndarray, ray.ObjectRef): + raise ValueError( + "Expected list of Ray object refs, " + f"got list containing {type(ndarray)}" + ) + else: + raise ValueError( + f"Expected Ray object ref or list of Ray object refs, got {type(ndarray)}" + ) + + ctx = DataContext.get_current() + ndarray_to_block_remote = cached_remote_fn(ndarray_to_block, num_returns=2) + + res = [ndarray_to_block_remote.remote(ndarray, ctx) for ndarray in ndarrays] + blocks, metadata = map(list, zip(*res)) + metadata = ray.get(metadata) + + execution_plan = ExecutionPlan( + DatasetStats(metadata={"FromNumpy": metadata}, parent=None) + ) + logical_plan = LogicalPlan(FromNumpy(blocks, metadata), execution_plan._context) + + return MaterializedDataset( + execution_plan, + logical_plan, + ) + + +@PublicAPI +def from_arrow( + tables: Union["pyarrow.Table", bytes, List[Union["pyarrow.Table", bytes]]], +) -> MaterializedDataset: + """Create a :class:`~ray.data.Dataset` from a list of PyArrow tables. + + Examples: + >>> import pyarrow as pa + >>> import ray + >>> table = pa.table({"x": [1]}) + >>> ray.data.from_arrow(table) + MaterializedDataset(num_blocks=1, num_rows=1, schema={x: int64}) + + Create a Ray Dataset from a list of PyArrow tables. + + >>> ray.data.from_arrow([table, table]) + MaterializedDataset(num_blocks=2, num_rows=2, schema={x: int64}) + + + Args: + tables: A PyArrow table, or a list of PyArrow tables, + or its streaming format in bytes. + + Returns: + :class:`~ray.data.Dataset` holding data from the PyArrow tables. + """ + import pyarrow as pa + + if isinstance(tables, (pa.Table, bytes)): + tables = [tables] + return from_arrow_refs([ray.put(t) for t in tables]) + + +@DeveloperAPI +def from_arrow_refs( + tables: Union[ + ObjectRef[Union["pyarrow.Table", bytes]], + List[ObjectRef[Union["pyarrow.Table", bytes]]], + ], +) -> MaterializedDataset: + """Create a :class:`~ray.data.Dataset` from a list of Ray object references to + PyArrow tables. + + Examples: + >>> import pyarrow as pa + >>> import ray + >>> table_ref = ray.put(pa.table({"x": [1]})) + >>> ray.data.from_arrow_refs(table_ref) + MaterializedDataset(num_blocks=1, num_rows=1, schema={x: int64}) + + Create a Ray Dataset from a list of PyArrow table references + + >>> ray.data.from_arrow_refs([table_ref, table_ref]) + MaterializedDataset(num_blocks=2, num_rows=2, schema={x: int64}) + + + Args: + tables: A Ray object reference to Arrow table, or list of Ray object + references to Arrow tables, or its streaming format in bytes. + + Returns: + :class:`~ray.data.Dataset` holding data read from the tables. + """ + if isinstance(tables, ray.ObjectRef): + tables = [tables] + + get_metadata = cached_remote_fn(get_table_block_metadata) + metadata = ray.get([get_metadata.remote(t) for t in tables]) + execution_plan = ExecutionPlan( + DatasetStats(metadata={"FromArrow": metadata}, parent=None) + ) + logical_plan = LogicalPlan(FromArrow(tables, metadata), execution_plan._context) + + return MaterializedDataset( + execution_plan, + logical_plan, + ) + + +@PublicAPI(stability="alpha") +def read_delta_sharing_tables( + url: str, + *, + limit: Optional[int] = None, + version: Optional[int] = None, + timestamp: Optional[str] = None, + json_predicate_hints: Optional[str] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + concurrency: Optional[int] = None, + override_num_blocks: Optional[int] = None, +) -> Dataset: + """ + Read data from a Delta Sharing table. + Delta Sharing projct https://github.com/delta-io/delta-sharing/tree/main + + This function reads data from a Delta Sharing table specified by the URL. + It supports various options such as limiting the number of rows, specifying + a version or timestamp, and configuring concurrency. + + Before calling this function, ensure that the URL is correctly formatted + to point to the Delta Sharing table you want to access. Make sure you have + a valid delta_share profile in the working directory. + + Examples: + + .. testcode:: + :skipif: True + + import ray + + ds = ray.data.read_delta_sharing_tables( + url=f"your-profile.json#your-share-name.your-schema-name.your-table-name", + limit=100000, + version=1, + ) + + Args: + url: A URL under the format + "#..". + Example can be found at + https://github.com/delta-io/delta-sharing/blob/main/README.md#quick-start + limit: A non-negative integer. Load only the ``limit`` rows if the + parameter is specified. Use this optional parameter to explore the + shared table without loading the entire table into memory. + version: A non-negative integer. Load the snapshot of the table at + the specified version. + timestamp: A timestamp to specify the version of the table to read. + json_predicate_hints: Predicate hints to be applied to the table. For more + details, see: + https://github.com/delta-io/delta-sharing/blob/main/PROTOCOL.md#json-predicates-for-filtering. + ray_remote_args: kwargs passed to :meth:`~ray.remote` in the read tasks. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control the number of tasks to run concurrently. This doesn't change the + total number of tasks run or the total number of output blocks. By default, + concurrency is dynamically decided based on the available resources. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + + Returns: + A :class:`Dataset` containing the queried data. + + Raises: + ValueError: If the URL is not properly formatted or if there is an issue + with the Delta Sharing table connection. + """ + + datasource = DeltaSharingDatasource( + url=url, + json_predicate_hints=json_predicate_hints, + limit=limit, + version=version, + timestamp=timestamp, + ) + # DeltaSharing limit is at the add_files level, it will not return + # exactly the limit number of rows but it will return less files and rows. + return ray.data.read_datasource( + datasource=datasource, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + override_num_blocks=override_num_blocks, + ) + + +@PublicAPI +def from_spark( + df: "pyspark.sql.DataFrame", + *, + parallelism: Optional[int] = None, + override_num_blocks: Optional[int] = None, +) -> MaterializedDataset: + """Create a :class:`~ray.data.Dataset` from a + `Spark DataFrame `_. + + Args: + df: A `Spark DataFrame`_, which must be created by RayDP (Spark-on-Ray). + parallelism: This argument is deprecated. Use ``override_num_blocks`` argument. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + + Returns: + A :class:`~ray.data.MaterializedDataset` holding rows read from the DataFrame. + """ # noqa: E501 + import raydp + + parallelism = _get_num_output_blocks(parallelism, override_num_blocks) + return raydp.spark.spark_dataframe_to_ray_dataset(df, parallelism) + + +@PublicAPI +def from_huggingface( + dataset: Union["datasets.Dataset", "datasets.IterableDataset"], + parallelism: int = -1, + concurrency: Optional[int] = None, + override_num_blocks: Optional[int] = None, +) -> Union[MaterializedDataset, Dataset]: + """Create a :class:`~ray.data.MaterializedDataset` from a + `Hugging Face Datasets Dataset `_ + or a :class:`~ray.data.Dataset` from a `Hugging Face Datasets IterableDataset `_. + For an `IterableDataset`, we use a streaming implementation to read data. + + If the dataset is a public Hugging Face Dataset that is hosted on the Hugging Face Hub and + no transformations have been applied, then the `hosted parquet files `_ + will be passed to :meth:`~ray.data.read_parquet` to perform a distributed read. All + other cases will be done with a single node read. + + Example: + + .. + The following `testoutput` is mocked to avoid illustrating download + logs like "Downloading and preparing dataset 162.17 MiB". + + .. testcode:: + + import ray + import datasets + + hf_dataset = datasets.load_dataset("tweet_eval", "emotion") + ray_ds = ray.data.from_huggingface(hf_dataset["train"]) + print(ray_ds) + + hf_dataset_stream = datasets.load_dataset("tweet_eval", "emotion", streaming=True) + ray_ds_stream = ray.data.from_huggingface(hf_dataset_stream["train"]) + print(ray_ds_stream) + + .. testoutput:: + :options: +MOCK + + MaterializedDataset( + num_blocks=..., + num_rows=3257, + schema={text: string, label: int64} + ) + Dataset( + num_rows=3257, + schema={text: string, label: int64} + ) + + Args: + dataset: A `Hugging Face Datasets Dataset`_ or `Hugging Face Datasets IterableDataset`_. + `DatasetDict `_ + and `IterableDatasetDict `_ + are not supported. + parallelism: This argument is deprecated. Use ``override_num_blocks`` argument. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run or the total number of output blocks. By default, + concurrency is dynamically decided based on the available resources. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + + Returns: + A :class:`~ray.data.Dataset` holding rows from the `Hugging Face Datasets Dataset`_. + """ # noqa: E501 + import datasets + from aiohttp.client_exceptions import ClientResponseError + + from ray.data._internal.datasource.huggingface_datasource import ( + HuggingFaceDatasource, + ) + + if isinstance(dataset, (datasets.IterableDataset, datasets.Dataset)): + try: + # Attempt to read data via Hugging Face Hub parquet files. If the + # returned list of files is empty, attempt read via other methods. + file_urls = HuggingFaceDatasource.list_parquet_urls_from_dataset(dataset) + if len(file_urls) > 0: + # If file urls are returned, the parquet files are available via API + # TODO: Add support for reading from http filesystem in + # FileBasedDatasource. GH Issue: + # https://github.com/ray-project/ray/issues/42706 + import fsspec.implementations.http + + http = fsspec.implementations.http.HTTPFileSystem() + return read_parquet( + file_urls, + parallelism=parallelism, + filesystem=http, + concurrency=concurrency, + override_num_blocks=override_num_blocks, + ray_remote_args={ + "retry_exceptions": [FileNotFoundError, ClientResponseError] + }, + ) + except (FileNotFoundError, ClientResponseError): + logger.warning( + "Distrubuted read via Hugging Face Hub parquet files failed, " + "falling back on single node read." + ) + + if isinstance(dataset, datasets.IterableDataset): + # For an IterableDataset, we can use a streaming implementation to read data. + return read_datasource( + HuggingFaceDatasource(dataset=dataset), + parallelism=parallelism, + concurrency=concurrency, + override_num_blocks=override_num_blocks, + ) + if isinstance(dataset, datasets.Dataset): + # For non-streaming Hugging Face Dataset, we don't support override_num_blocks + if override_num_blocks is not None: + raise ValueError( + "`override_num_blocks` parameter is not supported for " + "streaming Hugging Face Datasets. Please omit the parameter or " + "use non-streaming mode to read the dataset." + ) + + # To get the resulting Arrow table from a Hugging Face Dataset after + # applying transformations (e.g., train_test_split(), shard(), select()), + # we create a copy of the Arrow table, which applies the indices + # mapping from the transformations. + hf_ds_arrow = dataset.with_format("arrow") + ray_ds = from_arrow(hf_ds_arrow[:]) + return ray_ds + elif isinstance(dataset, (datasets.DatasetDict, datasets.IterableDatasetDict)): + available_keys = list(dataset.keys()) + raise DeprecationWarning( + "You provided a Hugging Face DatasetDict or IterableDatasetDict, " + "which contains multiple datasets, but `from_huggingface` now " + "only accepts a single Hugging Face Dataset. To convert just " + "a single Hugging Face Dataset to a Ray Dataset, specify a split. " + "For example, `ray.data.from_huggingface(my_dataset_dictionary" + f"['{available_keys[0]}'])`. " + f"Available splits are {available_keys}." + ) + else: + raise TypeError( + f"`dataset` must be a `datasets.Dataset`, but got {type(dataset)}" + ) + + +@PublicAPI +def from_tf( + dataset: "tf.data.Dataset", +) -> MaterializedDataset: + """Create a :class:`~ray.data.Dataset` from a + `TensorFlow Dataset `_. + + This function is inefficient. Use it to read small datasets or prototype. + + .. warning:: + If your dataset is large, this function may execute slowly or raise an + out-of-memory error. To avoid issues, read the underyling data with a function + like :meth:`~ray.data.read_images`. + + .. note:: + This function isn't parallelized. It loads the entire dataset into the local + node's memory before moving the data to the distributed object store. + + Examples: + >>> import ray + >>> import tensorflow_datasets as tfds + >>> dataset, _ = tfds.load('cifar10', split=["train", "test"]) # doctest: +SKIP + >>> ds = ray.data.from_tf(dataset) # doctest: +SKIP + >>> ds # doctest: +SKIP + MaterializedDataset( + num_blocks=..., + num_rows=50000, + schema={ + id: binary, + image: numpy.ndarray(shape=(32, 32, 3), dtype=uint8), + label: int64 + } + ) + >>> ds.take(1) # doctest: +SKIP + [{'id': b'train_16399', 'image': array([[[143, 96, 70], + [141, 96, 72], + [135, 93, 72], + ..., + [ 96, 37, 19], + [105, 42, 18], + [104, 38, 20]], + ..., + [[195, 161, 126], + [187, 153, 123], + [186, 151, 128], + ..., + [212, 177, 147], + [219, 185, 155], + [221, 187, 157]]], dtype=uint8), 'label': 7}] + + Args: + dataset: A `TensorFlow Dataset`_. + + Returns: + A :class:`MaterializedDataset` that contains the samples stored in the `TensorFlow Dataset`_. + """ # noqa: E501 + # FIXME: `as_numpy_iterator` errors if `dataset` contains ragged tensors. + return from_items(list(dataset.as_numpy_iterator())) + + +@PublicAPI +def from_torch( + dataset: "torch.utils.data.Dataset", + local_read: bool = False, +) -> Dataset: + """Create a :class:`~ray.data.Dataset` from a + `Torch Dataset `_. + + .. note:: + The input dataset can either be map-style or iterable-style, and can have arbitrarily large amount of data. + The data will be sequentially streamed with one single read task. + + Examples: + >>> import ray + >>> from torchvision import datasets + >>> dataset = datasets.MNIST("data", download=True) # doctest: +SKIP + >>> ds = ray.data.from_torch(dataset) # doctest: +SKIP + >>> ds # doctest: +SKIP + MaterializedDataset(num_blocks=..., num_rows=60000, schema={item: object}) + >>> ds.take(1) # doctest: +SKIP + {"item": (, 5)} + + Args: + dataset: A `Torch Dataset`_. + local_read: If ``True``, perform the read as a local read. + + Returns: + A :class:`~ray.data.Dataset` containing the Torch dataset samples. + """ # noqa: E501 + + # Files may not be accessible from all nodes, run the read task on current node. + ray_remote_args = {} + if local_read: + ray_remote_args = { + "scheduling_strategy": NodeAffinitySchedulingStrategy( + ray.get_runtime_context().get_node_id(), + soft=False, + ), + # The user might have initialized Ray to have num_cpus = 0 for the head + # node. For a local read we expect the read task to be executed on the + # head node, so we should set num_cpus = 0 for the task to allow it to + # run regardless of the user's head node configuration. + "num_cpus": 0, + } + return read_datasource( + TorchDatasource(dataset=dataset), + ray_remote_args=ray_remote_args, + # Only non-parallel, streaming read is currently supported + override_num_blocks=1, + ) + + +@PublicAPI +def read_iceberg( + *, + table_identifier: str, + row_filter: Union[str, "BooleanExpression"] = None, + parallelism: int = -1, + selected_fields: Tuple[str, ...] = ("*",), + snapshot_id: Optional[int] = None, + scan_kwargs: Optional[Dict[str, str]] = None, + catalog_kwargs: Optional[Dict[str, str]] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + override_num_blocks: Optional[int] = None, +) -> Dataset: + """Create a :class:`~ray.data.Dataset` from an Iceberg table. + + The table to read from is specified using a fully qualified ``table_identifier``. + Using PyIceberg, any intended row filters, selection of specific fields and + picking of a particular snapshot ID are applied, and the files that satisfy + the query are distributed across Ray read tasks. + The number of output blocks is determined by ``override_num_blocks`` + which can be requested from this interface or automatically chosen if + unspecified. + + .. tip:: + + For more details on PyIceberg, see + - URI: https://py.iceberg.apache.org/ + + Examples: + >>> import ray + >>> from pyiceberg.expressions import EqualTo #doctest: +SKIP + >>> ds = ray.data.read_iceberg( #doctest: +SKIP + ... table_identifier="db_name.table_name", + ... row_filter=EqualTo("column_name", "literal_value"), + ... catalog_kwargs={"name": "default", "type": "glue"} + ... ) + + Args: + table_identifier: Fully qualified table identifier (``db_name.table_name``) + row_filter: A PyIceberg :class:`~pyiceberg.expressions.BooleanExpression` + to use to filter the data *prior* to reading + parallelism: This argument is deprecated. Use ``override_num_blocks`` argument. + selected_fields: Which columns from the data to read, passed directly to + PyIceberg's load functions. Should be an tuple of string column names. + snapshot_id: Optional snapshot ID for the Iceberg table, by default the latest + snapshot is used + scan_kwargs: Optional arguments to pass to PyIceberg's Table.scan() function + (e.g., case_sensitive, limit, etc.) + catalog_kwargs: Optional arguments to pass to PyIceberg's catalog.load_catalog() + function (e.g., name, type, etc.). For the function definition, see + `pyiceberg catalog + `_. + ray_remote_args: Optional arguments to pass to `ray.remote` in the read tasks + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources, and capped at the number of + physical files to be read. You shouldn't manually set this value in most + cases. + + Returns: + :class:`~ray.data.Dataset` with rows from the Iceberg table. + """ + + # Setup the Datasource + datasource = IcebergDatasource( + table_identifier=table_identifier, + row_filter=row_filter, + selected_fields=selected_fields, + snapshot_id=snapshot_id, + scan_kwargs=scan_kwargs, + catalog_kwargs=catalog_kwargs, + ) + + dataset = read_datasource( + datasource=datasource, + parallelism=parallelism, + override_num_blocks=override_num_blocks, + ray_remote_args=ray_remote_args, + ) + + return dataset + + +@PublicAPI +def read_lance( + uri: str, + *, + columns: Optional[List[str]] = None, + filter: Optional[str] = None, + storage_options: Optional[Dict[str, str]] = None, + scanner_options: Optional[Dict[str, Any]] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + concurrency: Optional[int] = None, + override_num_blocks: Optional[int] = None, +) -> Dataset: + """ + Create a :class:`~ray.data.Dataset` from a + `Lance Dataset `_. + + Examples: + >>> import ray + >>> ds = ray.data.read_lance( # doctest: +SKIP + ... uri="./db_name.lance", + ... columns=["image", "label"], + ... filter="label = 2 AND text IS NOT NULL", + ... ) + + Args: + uri: The URI of the Lance dataset to read from. Local file paths, S3, and GCS + are supported. + columns: The columns to read. By default, all columns are read. + filter: Read returns only the rows matching the filter. By default, no + filter is applied. + storage_options: Extra options that make sense for a particular storage + connection. This is used to store connection parameters like credentials, + endpoint, etc. For more information, see `Object Store Configuration `_. + scanner_options: Additional options to configure the `LanceDataset.scanner()` + method, such as `batch_size`. For more information, + see `LanceDB API doc `_ + ray_remote_args: kwargs passed to :meth:`~ray.remote` in the read tasks. + concurrency: The maximum number of Ray tasks to run concurrently. Set this + to control number of tasks to run concurrently. This doesn't change the + total number of tasks run or the total number of output blocks. By default, + concurrency is dynamically decided based on the available resources. + override_num_blocks: Override the number of output blocks from all read tasks. + By default, the number of output blocks is dynamically decided based on + input data size and available resources. You shouldn't manually set this + value in most cases. + + Returns: + A :class:`~ray.data.Dataset` producing records read from the Lance dataset. + """ # noqa: E501 + datasource = LanceDatasource( + uri=uri, + columns=columns, + filter=filter, + storage_options=storage_options, + scanner_options=scanner_options, + ) + + return read_datasource( + datasource=datasource, + ray_remote_args=ray_remote_args, + concurrency=concurrency, + override_num_blocks=override_num_blocks, + ) + + +def _get_datasource_or_legacy_reader( + ds: Datasource, + ctx: DataContext, + kwargs: dict, +) -> Union[Datasource, Reader]: + """Generates reader. + + Args: + ds: Datasource to read from. + ctx: Dataset config to use. + kwargs: Additional kwargs to pass to the legacy reader if + `Datasource.create_reader` is implemented. + + Returns: + The datasource or a generated legacy reader. + """ + kwargs = _unwrap_arrow_serialization_workaround(kwargs) + + DataContext._set_current(ctx) + + if ds.should_create_reader: + warnings.warn( + "`create_reader` has been deprecated in Ray 2.9. Instead of creating a " + "`Reader`, implement `Datasource.get_read_tasks` and " + "`Datasource.estimate_inmemory_data_size`.", + DeprecationWarning, + ) + datasource_or_legacy_reader = ds.create_reader(**kwargs) + else: + datasource_or_legacy_reader = ds + + return datasource_or_legacy_reader + + +def _resolve_parquet_args( + tensor_column_schema: Optional[Dict[str, Tuple[np.dtype, Tuple[int, ...]]]] = None, + **arrow_parquet_args, +) -> Dict[str, Any]: + if tensor_column_schema is not None: + existing_block_udf = arrow_parquet_args.pop("_block_udf", None) + + def _block_udf(block: "pyarrow.Table") -> "pyarrow.Table": + from ray.data.extensions import ArrowTensorArray + + for tensor_col_name, (dtype, shape) in tensor_column_schema.items(): + # NOTE(Clark): We use NumPy to consolidate these potentially + # non-contiguous buffers, and to do buffer bookkeeping in + # general. + np_col = _create_possibly_ragged_ndarray( + [ + np.ndarray(shape, buffer=buf.as_buffer(), dtype=dtype) + for buf in block.column(tensor_col_name) + ] + ) + + block = block.set_column( + block._ensure_integer_index(tensor_col_name), + tensor_col_name, + ArrowTensorArray.from_numpy(np_col, tensor_col_name), + ) + if existing_block_udf is not None: + # Apply UDF after casting the tensor columns. + block = existing_block_udf(block) + return block + + arrow_parquet_args["_block_udf"] = _block_udf + return arrow_parquet_args + + +def _get_num_output_blocks( + parallelism: int = -1, + override_num_blocks: Optional[int] = None, +) -> int: + if parallelism != -1: + logger.warning( + "The argument ``parallelism`` is deprecated in Ray 2.10. Please specify " + "argument ``override_num_blocks`` instead." + ) + elif override_num_blocks is not None: + parallelism = override_num_blocks + return parallelism + + +def _validate_shuffle_arg(shuffle: Optional[str]) -> None: + if shuffle not in [None, "files"]: + raise ValueError( + f"Invalid value for 'shuffle': {shuffle}. " + "Valid values are None, 'files'." + ) + + +def _emit_meta_provider_deprecation_warning( + meta_provider: Optional[BaseFileMetadataProvider], +) -> None: + if meta_provider is not None: + warnings.warn( + "The `meta_provider` argument is deprecated and will be removed after May " + "2025.", + DeprecationWarning, + ) diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/__init__.py b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2479501640ba3f2b0be30c3548ee465a13a481a9 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/__init__.py @@ -0,0 +1,4 @@ +from ray.widgets.render import Template +from ray.widgets.util import make_table_html_repr + +__all__ = ["Template", "make_table_html_repr"] diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/__pycache__/__init__.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca4ab8c15e8081aaf63f4fc8ada9a9f30b04a458 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/__pycache__/render.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/__pycache__/render.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4206c90cd0e16e4b9373bfa9035f948f321f7b0a Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/__pycache__/render.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/__pycache__/util.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17a2470410779236211b9ab18bbc66d8ac587968 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/__pycache__/util.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/render.py b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/render.py new file mode 100644 index 0000000000000000000000000000000000000000..f9e861d39925680c403ff996e9279d4d349bafe5 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/render.py @@ -0,0 +1,39 @@ +import pathlib +from typing import List + +from ray.util.annotations import DeveloperAPI + + +@DeveloperAPI +class Template: + """Class which provides basic HTML templating.""" + + def __init__(self, file: str): + with open(pathlib.Path(__file__).parent / "templates" / file, "r") as f: + self.template = f.read() + + def render(self, **kwargs) -> str: + """Render an HTML template with the given data. + + This is done by replacing instances of `{{ key }}` with `value` + from the keyword arguments. + + Returns: + HTML template with the keys of the kwargs replaced with corresponding + values. + """ + rendered = self.template + for key, value in kwargs.items(): + if isinstance(value, List): + value = "".join(value) + rendered = rendered.replace("{{ " + key + " }}", value if value else "") + return rendered + + @staticmethod + def list_templates() -> List[pathlib.Path]: + """List the available HTML templates. + + Returns: + A list of files with .html.j2 extensions inside ../templates/ + """ + return (pathlib.Path(__file__).parent / "templates").glob("*.html.j2") diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/context.html.j2 b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/context.html.j2 new file mode 100644 index 0000000000000000000000000000000000000000..26cc0ef6c8784c9859151f8553b240d7a94e9360 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/context.html.j2 @@ -0,0 +1,6 @@ + diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/context_dashrow.html.j2 b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/context_dashrow.html.j2 new file mode 100644 index 0000000000000000000000000000000000000000..47fbc2fa6f6da7e8a44c88c0e07ae51c6a737dbd --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/context_dashrow.html.j2 @@ -0,0 +1,4 @@ + + Dashboard: + {{ dashboard_url }} + diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/context_logo.html.j2 b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/context_logo.html.j2 new file mode 100644 index 0000000000000000000000000000000000000000..9233fe3a77226c760442d5366b8d5fb65b93abcd --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/context_logo.html.j2 @@ -0,0 +1,13 @@ + diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/context_table.html.j2 b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/context_table.html.j2 new file mode 100644 index 0000000000000000000000000000000000000000..d06822d0c1f56534e5009ea2ff81340fdc6f7850 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/context_table.html.j2 @@ -0,0 +1,11 @@ + + + + + + + + + + {{ dashboard_row }} + diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/divider.html.j2 b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/divider.html.j2 new file mode 100644 index 0000000000000000000000000000000000000000..b9a04173d7e0918c54a1d950da25938a215be3a2 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/divider.html.j2 @@ -0,0 +1,9 @@ +
+ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/rendered_html_common.html.j2 b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/rendered_html_common.html.j2 new file mode 100644 index 0000000000000000000000000000000000000000..35b4cee0133a3b4105e84ca7a66a2ffeb01f4c5b --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/rendered_html_common.html.j2 @@ -0,0 +1,3 @@ + diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/run_config.html.j2 b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/run_config.html.j2 new file mode 100644 index 0000000000000000000000000000000000000000..4cf392dff519cdd5d7c0629ca3394f47ef705825 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/run_config.html.j2 @@ -0,0 +1,18 @@ +
+
+ {{ settings }} +
+
+ {{ subconfigs }} +
+
+ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/scrollableTable.html.j2 b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/scrollableTable.html.j2 new file mode 100644 index 0000000000000000000000000000000000000000..2ec1637b92ee7de3d22bba0a956b8539464c0501 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/scrollableTable.html.j2 @@ -0,0 +1,20 @@ + + diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/title_data.html.j2 b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/title_data.html.j2 new file mode 100644 index 0000000000000000000000000000000000000000..1731a157d17b4674e3d7cc427164065dafe49de2 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/title_data.html.j2 @@ -0,0 +1,11 @@ + + diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/title_data_mini.html.j2 b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/title_data_mini.html.j2 new file mode 100644 index 0000000000000000000000000000000000000000..bfe654c56346b09a1999dda379d129aaf1e317be --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/title_data_mini.html.j2 @@ -0,0 +1,4 @@ + diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/trial_progress.html.j2 b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/trial_progress.html.j2 new file mode 100644 index 0000000000000000000000000000000000000000..f3a323193e7fa242f9cc37a75d22d02841b74da6 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/trial_progress.html.j2 @@ -0,0 +1,17 @@ +
+

Trial Progress

+ {{ table }} +
+ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/tune_status.html.j2 b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/tune_status.html.j2 new file mode 100644 index 0000000000000000000000000000000000000000..df422f89af4e7d03bc70006414f12652f5898824 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/tune_status.html.j2 @@ -0,0 +1,49 @@ +
+
+
+

Tune Status

+ {{ status_table }} +
+
+
+

System Info

+ {{ sys_info_message }} +
+ {{ messages }} +
+
+
+

Trial Status

+ {{ trial_progress }} +
+
+ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/tune_status_messages.html.j2 b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/tune_status_messages.html.j2 new file mode 100644 index 0000000000000000000000000000000000000000..da8e75f5f58d3a5c78274429e7490947aff4c410 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/templates/tune_status_messages.html.j2 @@ -0,0 +1,25 @@ +
+
+

Messages

+ {{ memory_message }} + {{ trial_progress_messages }} + {{ trial_errors }} +
+ diff --git a/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/util.py b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/util.py new file mode 100644 index 0000000000000000000000000000000000000000..2f171c6519cde57fd69033e040a9305ebbbdf53b --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/ray/widgets/util.py @@ -0,0 +1,207 @@ +import importlib +import logging +import sys +import textwrap +from functools import wraps +from typing import Any, Callable, Iterable, Optional, TypeVar, Union + +from packaging.version import Version + +from ray._private.thirdparty.tabulate.tabulate import tabulate +from ray.util.annotations import DeveloperAPI +from ray.widgets import Template + +logger = logging.getLogger(__name__) + +F = TypeVar("F", bound=Callable[..., Any]) + + +@DeveloperAPI +def make_table_html_repr( + obj: Any, title: Optional[str] = None, max_height: str = "none" +) -> str: + """Generate a generic html repr using a table. + + Args: + obj: Object for which a repr is to be generated + title: If present, a title for the section is included + max_height: Maximum height of the table; valid values + are given by the max-height CSS property + + Returns: + HTML representation of the object + """ + data = {} + for k, v in vars(obj).items(): + if isinstance(v, (str, bool, int, float)): + data[k] = str(v) + + elif isinstance(v, dict) or hasattr(v, "__dict__"): + data[k] = Template("scrollableTable.html.j2").render( + table=tabulate( + v.items() if isinstance(v, dict) else vars(v).items(), + tablefmt="html", + showindex=False, + headers=["Setting", "Value"], + ), + max_height="none", + ) + + table = Template("scrollableTable.html.j2").render( + table=tabulate( + data.items(), + tablefmt="unsafehtml", + showindex=False, + headers=["Setting", "Value"], + ), + max_height=max_height, + ) + + if title: + content = Template("title_data.html.j2").render(title=title, data=table) + else: + content = table + + return content + + +def _has_missing( + *deps: Iterable[Union[str, Optional[str]]], message: Optional[str] = None +): + """Return a list of missing dependencies. + + Args: + deps: Dependencies to check for + message: Message to be emitted if a dependency isn't found + + Returns: + A list of dependencies which can't be found, if any + """ + missing = [] + for (lib, _) in deps: + if importlib.util.find_spec(lib) is None: + missing.append(lib) + + if missing: + if not message: + message = f"Run `pip install {' '.join(missing)}` for rich notebook output." + + # stacklevel=3: First level is this function, then ensure_notebook_deps, + # then the actual function affected. + logger.info(f"Missing packages: {missing}. {message}", stacklevel=3) + + return missing + + +def _has_outdated( + *deps: Iterable[Union[str, Optional[str]]], message: Optional[str] = None +): + outdated = [] + for (lib, version) in deps: + try: + + module = importlib.import_module(lib) + if version and Version(module.__version__) < Version(version): + outdated.append([lib, version, module.__version__]) + except ImportError: + pass + + if outdated: + outdated_strs = [] + install_args = [] + for lib, version, installed in outdated: + outdated_strs.append(f"{lib}=={installed} found, needs {lib}>={version}") + install_args.append(f"{lib}>={version}") + + outdated_str = textwrap.indent("\n".join(outdated_strs), " ") + install_str = " ".join(install_args) + + if not message: + message = f"Run `pip install -U {install_str}` for rich notebook output." + + # stacklevel=3: First level is this function, then ensure_notebook_deps, + # then the actual function affected. + logger.info(f"Outdated packages:\n{outdated_str}\n{message}", stacklevel=3) + + return outdated + + +@DeveloperAPI +def repr_with_fallback( + *notebook_deps: Iterable[Union[str, Optional[str]]] +) -> Callable[[F], F]: + """Decorator which strips rich notebook output from mimebundles in certain cases. + + Fallback to plaintext and don't use rich output in the following cases: + 1. In a notebook environment and the appropriate dependencies are not installed. + 2. In a ipython shell environment. + 3. In Google Colab environment. + See https://github.com/googlecolab/colabtools/ issues/60 for more information + about the status of this issue. + + Args: + notebook_deps: The required dependencies and version for notebook environment. + + Returns: + A function that returns the usual _repr_mimebundle_, unless any of the 3 + conditions above hold, in which case it returns a mimebundle that only contains + a single text/plain mimetype. + """ + message = ( + "Run `pip install -U ipywidgets`, then restart " + "the notebook server for rich notebook output." + ) + if _can_display_ipywidgets(*notebook_deps, message=message): + + def wrapper(func: F) -> F: + @wraps(func) + def wrapped(self, *args, **kwargs): + return func(self, *args, **kwargs) + + return wrapped + + else: + + def wrapper(func: F) -> F: + @wraps(func) + def wrapped(self, *args, **kwargs): + return {"text/plain": repr(self)} + + return wrapped + + return wrapper + + +def _get_ipython_shell_name() -> str: + if "IPython" in sys.modules: + from IPython import get_ipython + + return get_ipython().__class__.__name__ + return "" + + +def _can_display_ipywidgets(*deps, message) -> bool: + # Default to safe behavior: only display widgets if running in a notebook + # that has valid dependencies + if in_notebook() and not ( + _has_missing(*deps, message=message) or _has_outdated(*deps, message=message) + ): + return True + + return False + + +@DeveloperAPI +def in_notebook(shell_name: Optional[str] = None) -> bool: + """Return whether we are in a Jupyter notebook or qtconsole.""" + if not shell_name: + shell_name = _get_ipython_shell_name() + return shell_name == "ZMQInteractiveShell" + + +@DeveloperAPI +def in_ipython_shell(shell_name: Optional[str] = None) -> bool: + """Return whether we are in a terminal running IPython""" + if not shell_name: + shell_name = _get_ipython_shell_name() + return shell_name == "TerminalInteractiveShell"