diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/ANDROID/__init__.py b/vllm/lib/python3.10/site-packages/OpenGL/EGL/ANDROID/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9b912d19ef8f0e54409434cb78557ba570cae4c7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/EGL/ANDROID/__init__.py @@ -0,0 +1 @@ +"""OpenGL Extensions""" \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/ANDROID/__pycache__/recordable.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/EGL/ANDROID/__pycache__/recordable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc3c08972ddc9fd125a143f1991969f699c9ff72 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/EGL/ANDROID/__pycache__/recordable.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/ANDROID/framebuffer_target.py b/vllm/lib/python3.10/site-packages/OpenGL/EGL/ANDROID/framebuffer_target.py new file mode 100644 index 0000000000000000000000000000000000000000..aa3f5d350285cc71c9b57953df0714e24e392ada --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/EGL/ANDROID/framebuffer_target.py @@ -0,0 +1,23 @@ +'''OpenGL extension ANDROID.framebuffer_target + +This module customises the behaviour of the +OpenGL.raw.EGL.ANDROID.framebuffer_target to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/ANDROID/framebuffer_target.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.EGL import _types, _glgets +from OpenGL.raw.EGL.ANDROID.framebuffer_target import * +from OpenGL.raw.EGL.ANDROID.framebuffer_target import _EXTENSION_NAME + +def glInitFramebufferTargetANDROID(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/ANDROID/image_native_buffer.py b/vllm/lib/python3.10/site-packages/OpenGL/EGL/ANDROID/image_native_buffer.py new file mode 100644 index 0000000000000000000000000000000000000000..d7849d661d6cad8bed873399141cc54597de03ed --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/EGL/ANDROID/image_native_buffer.py @@ -0,0 +1,23 @@ +'''OpenGL extension ANDROID.image_native_buffer + +This module customises the behaviour of the +OpenGL.raw.EGL.ANDROID.image_native_buffer to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/ANDROID/image_native_buffer.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.EGL import _types, _glgets +from OpenGL.raw.EGL.ANDROID.image_native_buffer import * +from OpenGL.raw.EGL.ANDROID.image_native_buffer import _EXTENSION_NAME + +def glInitImageNativeBufferANDROID(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/ANDROID/native_fence_sync.py b/vllm/lib/python3.10/site-packages/OpenGL/EGL/ANDROID/native_fence_sync.py new file mode 100644 index 0000000000000000000000000000000000000000..37cbad3bc836042719d858e810d13402adcec8c3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/EGL/ANDROID/native_fence_sync.py @@ -0,0 +1,23 @@ +'''OpenGL extension ANDROID.native_fence_sync + +This module customises the behaviour of the +OpenGL.raw.EGL.ANDROID.native_fence_sync to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/ANDROID/native_fence_sync.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.EGL import _types, _glgets +from OpenGL.raw.EGL.ANDROID.native_fence_sync import * +from OpenGL.raw.EGL.ANDROID.native_fence_sync import _EXTENSION_NAME + +def glInitNativeFenceSyncANDROID(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/ANDROID/recordable.py b/vllm/lib/python3.10/site-packages/OpenGL/EGL/ANDROID/recordable.py new file mode 100644 index 0000000000000000000000000000000000000000..87ec54ccbdd6f8cffb0c11e6c66981c557eb7400 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/EGL/ANDROID/recordable.py @@ -0,0 +1,23 @@ +'''OpenGL extension ANDROID.recordable + +This module customises the behaviour of the +OpenGL.raw.EGL.ANDROID.recordable to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/ANDROID/recordable.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.EGL import _types, _glgets +from OpenGL.raw.EGL.ANDROID.recordable import * +from OpenGL.raw.EGL.ANDROID.recordable import _EXTENSION_NAME + +def glInitRecordableANDROID(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/__pycache__/buffer_age.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/__pycache__/buffer_age.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..699b2938476af693ee02cf770aefbbe6a662253e Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/__pycache__/buffer_age.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/__pycache__/client_extensions.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/__pycache__/client_extensions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4e40733fc0c389bced8bb8147b1fff99f573647 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/__pycache__/client_extensions.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/__pycache__/create_context_robustness.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/__pycache__/create_context_robustness.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e59b5d5b00b822283e408b8f6909740ae59851e8 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/__pycache__/create_context_robustness.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/__pycache__/multiview_window.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/__pycache__/multiview_window.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c881ff03d0b742e2fec77b9418c580eed7cc9520 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/__pycache__/multiview_window.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/__pycache__/protected_surface.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/__pycache__/protected_surface.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15d7f748f5fc39313eb2e1a4962805ce24972ff2 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/__pycache__/protected_surface.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/__pycache__/swap_buffers_with_damage.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/__pycache__/swap_buffers_with_damage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7663743044834bb83fb1ab33862ce26ed727b70e Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/__pycache__/swap_buffers_with_damage.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/buffer_age.py b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/buffer_age.py new file mode 100644 index 0000000000000000000000000000000000000000..b36b266fcbae204dd36b51e8f00199c0b17b67bc --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/buffer_age.py @@ -0,0 +1,23 @@ +'''OpenGL extension EXT.buffer_age + +This module customises the behaviour of the +OpenGL.raw.EGL.EXT.buffer_age to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/EXT/buffer_age.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.EGL import _types, _glgets +from OpenGL.raw.EGL.EXT.buffer_age import * +from OpenGL.raw.EGL.EXT.buffer_age import _EXTENSION_NAME + +def glInitBufferAgeEXT(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/client_extensions.py b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/client_extensions.py new file mode 100644 index 0000000000000000000000000000000000000000..5dc830d952b7104cb7bfe1081c4189f806ccc23f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/client_extensions.py @@ -0,0 +1,23 @@ +'''OpenGL extension EXT.client_extensions + +This module customises the behaviour of the +OpenGL.raw.EGL.EXT.client_extensions to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/EXT/client_extensions.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.EGL import _types, _glgets +from OpenGL.raw.EGL.EXT.client_extensions import * +from OpenGL.raw.EGL.EXT.client_extensions import _EXTENSION_NAME + +def glInitClientExtensionsEXT(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/create_context_robustness.py b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/create_context_robustness.py new file mode 100644 index 0000000000000000000000000000000000000000..6f678c08fd72ef4b323cdb8c56c46d384bc4a470 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/create_context_robustness.py @@ -0,0 +1,23 @@ +'''OpenGL extension EXT.create_context_robustness + +This module customises the behaviour of the +OpenGL.raw.EGL.EXT.create_context_robustness to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/EXT/create_context_robustness.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.EGL import _types, _glgets +from OpenGL.raw.EGL.EXT.create_context_robustness import * +from OpenGL.raw.EGL.EXT.create_context_robustness import _EXTENSION_NAME + +def glInitCreateContextRobustnessEXT(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/image_dma_buf_import.py b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/image_dma_buf_import.py new file mode 100644 index 0000000000000000000000000000000000000000..3fbea711733b7fa0499696163277eaa9427787b6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/image_dma_buf_import.py @@ -0,0 +1,23 @@ +'''OpenGL extension EXT.image_dma_buf_import + +This module customises the behaviour of the +OpenGL.raw.EGL.EXT.image_dma_buf_import to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/EXT/image_dma_buf_import.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.EGL import _types, _glgets +from OpenGL.raw.EGL.EXT.image_dma_buf_import import * +from OpenGL.raw.EGL.EXT.image_dma_buf_import import _EXTENSION_NAME + +def glInitImageDmaBufImportEXT(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/multiview_window.py b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/multiview_window.py new file mode 100644 index 0000000000000000000000000000000000000000..36f3e3d7b1e1d3079366c63e986d53f4fa80ba97 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/multiview_window.py @@ -0,0 +1,23 @@ +'''OpenGL extension EXT.multiview_window + +This module customises the behaviour of the +OpenGL.raw.EGL.EXT.multiview_window to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/EXT/multiview_window.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.EGL import _types, _glgets +from OpenGL.raw.EGL.EXT.multiview_window import * +from OpenGL.raw.EGL.EXT.multiview_window import _EXTENSION_NAME + +def glInitMultiviewWindowEXT(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/platform_x11.py b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/platform_x11.py new file mode 100644 index 0000000000000000000000000000000000000000..d5764df858315c5c26c0f760ea9914ca7a647de8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/platform_x11.py @@ -0,0 +1,23 @@ +'''OpenGL extension EXT.platform_x11 + +This module customises the behaviour of the +OpenGL.raw.EGL.EXT.platform_x11 to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/EXT/platform_x11.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.EGL import _types, _glgets +from OpenGL.raw.EGL.EXT.platform_x11 import * +from OpenGL.raw.EGL.EXT.platform_x11 import _EXTENSION_NAME + +def glInitPlatformX11EXT(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/swap_buffers_with_damage.py b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/swap_buffers_with_damage.py new file mode 100644 index 0000000000000000000000000000000000000000..544a68606d7da0a5741eeaf92f46eac7ad63045d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/EGL/EXT/swap_buffers_with_damage.py @@ -0,0 +1,23 @@ +'''OpenGL extension EXT.swap_buffers_with_damage + +This module customises the behaviour of the +OpenGL.raw.EGL.EXT.swap_buffers_with_damage to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/EXT/swap_buffers_with_damage.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.EGL import _types, _glgets +from OpenGL.raw.EGL.EXT.swap_buffers_with_damage import * +from OpenGL.raw.EGL.EXT.swap_buffers_with_damage import _EXTENSION_NAME + +def glInitSwapBuffersWithDamageEXT(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/EGL_1_0.py b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/EGL_1_0.py new file mode 100644 index 0000000000000000000000000000000000000000..3a399b25c3f02b6fd800e7ce2b2a0942b351299e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/EGL_1_0.py @@ -0,0 +1,23 @@ +'''OpenGL extension VERSION.EGL_1_0 + +This module customises the behaviour of the +OpenGL.raw.EGL.VERSION.EGL_1_0 to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/VERSION/EGL_1_0.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.EGL import _types, _glgets +from OpenGL.raw.EGL.VERSION.EGL_1_0 import * +from OpenGL.raw.EGL.VERSION.EGL_1_0 import _EXTENSION_NAME + +def glInitEgl10VERSION(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/EGL_1_1.py b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/EGL_1_1.py new file mode 100644 index 0000000000000000000000000000000000000000..78d474a2925595241ec9cb84036ef24194bb94c5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/EGL_1_1.py @@ -0,0 +1,23 @@ +'''OpenGL extension VERSION.EGL_1_1 + +This module customises the behaviour of the +OpenGL.raw.EGL.VERSION.EGL_1_1 to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/VERSION/EGL_1_1.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.EGL import _types, _glgets +from OpenGL.raw.EGL.VERSION.EGL_1_1 import * +from OpenGL.raw.EGL.VERSION.EGL_1_1 import _EXTENSION_NAME + +def glInitEgl11VERSION(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/EGL_1_2.py b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/EGL_1_2.py new file mode 100644 index 0000000000000000000000000000000000000000..bd32614485878fe2e9e2c512818ac1206fc07881 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/EGL_1_2.py @@ -0,0 +1,23 @@ +'''OpenGL extension VERSION.EGL_1_2 + +This module customises the behaviour of the +OpenGL.raw.EGL.VERSION.EGL_1_2 to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/VERSION/EGL_1_2.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.EGL import _types, _glgets +from OpenGL.raw.EGL.VERSION.EGL_1_2 import * +from OpenGL.raw.EGL.VERSION.EGL_1_2 import _EXTENSION_NAME + +def glInitEgl12VERSION(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/EGL_1_3.py b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/EGL_1_3.py new file mode 100644 index 0000000000000000000000000000000000000000..67db5f4a06612d3cbb4d486c25d0f03a9f9c365a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/EGL_1_3.py @@ -0,0 +1,23 @@ +'''OpenGL extension VERSION.EGL_1_3 + +This module customises the behaviour of the +OpenGL.raw.EGL.VERSION.EGL_1_3 to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/VERSION/EGL_1_3.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.EGL import _types, _glgets +from OpenGL.raw.EGL.VERSION.EGL_1_3 import * +from OpenGL.raw.EGL.VERSION.EGL_1_3 import _EXTENSION_NAME + +def glInitEgl13VERSION(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/EGL_1_4.py b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/EGL_1_4.py new file mode 100644 index 0000000000000000000000000000000000000000..c349ec45469e9819655a12378d387a67f2468f6b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/EGL_1_4.py @@ -0,0 +1,23 @@ +'''OpenGL extension VERSION.EGL_1_4 + +This module customises the behaviour of the +OpenGL.raw.EGL.VERSION.EGL_1_4 to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/VERSION/EGL_1_4.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.EGL import _types, _glgets +from OpenGL.raw.EGL.VERSION.EGL_1_4 import * +from OpenGL.raw.EGL.VERSION.EGL_1_4 import _EXTENSION_NAME + +def glInitEgl14VERSION(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/EGL_1_5.py b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/EGL_1_5.py new file mode 100644 index 0000000000000000000000000000000000000000..de06cd42b741d36ac465c2f3e5dd4420cf6c1357 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/EGL_1_5.py @@ -0,0 +1,23 @@ +'''OpenGL extension VERSION.EGL_1_5 + +This module customises the behaviour of the +OpenGL.raw.EGL.VERSION.EGL_1_5 to provide a more +Python-friendly API + +The official definition of this extension is available here: +http://www.opengl.org/registry/specs/VERSION/EGL_1_5.txt +''' +from OpenGL import platform, constant, arrays +from OpenGL import extensions, wrapper +import ctypes +from OpenGL.raw.EGL import _types, _glgets +from OpenGL.raw.EGL.VERSION.EGL_1_5 import * +from OpenGL.raw.EGL.VERSION.EGL_1_5 import _EXTENSION_NAME + +def glInitEgl15VERSION(): + '''Return boolean indicating whether this extension is available''' + from OpenGL import extensions + return extensions.hasGLExtension( _EXTENSION_NAME ) + + +### END AUTOGENERATED SECTION \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__init__.py b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9b912d19ef8f0e54409434cb78557ba570cae4c7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__init__.py @@ -0,0 +1 @@ +"""OpenGL Extensions""" \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__pycache__/EGL_1_0.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__pycache__/EGL_1_0.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f89f913b704859cf59c2516d1be0d086012df5fa Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__pycache__/EGL_1_0.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__pycache__/EGL_1_1.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__pycache__/EGL_1_1.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0aa08bca50dbdae720d9b2ac61c509efb1bb18a2 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__pycache__/EGL_1_1.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__pycache__/EGL_1_2.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__pycache__/EGL_1_2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f192fed9b2281af560135a7d12de89ab3ccb33d Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__pycache__/EGL_1_2.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__pycache__/EGL_1_3.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__pycache__/EGL_1_3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b57338e10108a76b7e6419a88178b707d7a0c693 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__pycache__/EGL_1_3.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__pycache__/EGL_1_4.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__pycache__/EGL_1_4.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..273c4dd274784e6d082c0a297af6c8e2ef1c3bd8 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__pycache__/EGL_1_4.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__pycache__/EGL_1_5.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__pycache__/EGL_1_5.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d36372c3de34784079c05eea99d091a123d4650 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__pycache__/EGL_1_5.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98a3cd221f475bf113226bab6d2f8d5357063fa3 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/OpenGL/EGL/VERSION/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/EGL/__init__.py b/vllm/lib/python3.10/site-packages/OpenGL/EGL/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5673e6ce05a867467bddcedb5ad6b3f1aeeb71bb --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/EGL/__init__.py @@ -0,0 +1,8 @@ +"""OpenGL.EGL the portable interface to GL environments""" +from OpenGL.raw.EGL._types import * +from OpenGL.EGL.VERSION.EGL_1_0 import * +from OpenGL.EGL.VERSION.EGL_1_1 import * +from OpenGL.EGL.VERSION.EGL_1_2 import * +from OpenGL.EGL.VERSION.EGL_1_3 import * +from OpenGL.EGL.VERSION.EGL_1_4 import * +from OpenGL.EGL.VERSION.EGL_1_5 import * diff --git a/vllm/lib/python3.10/site-packages/OpenGL/__init__.py b/vllm/lib/python3.10/site-packages/OpenGL/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5f5ae18a2dabfd15cb69cdd0ab989ee1f02e1cb7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/__init__.py @@ -0,0 +1,277 @@ +"""ctypes-based OpenGL wrapper for Python + +This is the PyOpenGL 3.x tree, it attempts to provide +a largely compatible API for code written with the +PyOpenGL 2.x series using the ctypes foreign function +interface system. + +Configuration Variables: + +There are a few configuration variables in this top-level +module. Applications should be the only code that tweaks +these variables, mid-level libraries should not take it +upon themselves to disable/enable features at this level. +The implication there is that your library code should be +able to work with any of the valid configurations available +with these sets of flags. + +Further, once any entry point has been loaded, the variables +can no longer be updated. The OpenGL._confligflags module +imports the variables from this location, and once that +import occurs the flags should no longer be changed. + + ERROR_CHECKING -- if set to a False value before + importing any OpenGL.* libraries will completely + disable error-checking. This can dramatically + improve performance, but makes debugging far + harder. + + This is intended to be turned off *only* in a + production environment where you *know* that + your code is entirely free of situations where you + use exception-handling to handle error conditions, + i.e. where you are explicitly checking for errors + everywhere they can occur in your code. + + Default: True + + ERROR_LOGGING -- If True, then wrap array-handler + functions with error-logging operations so that all exceptions + will be reported to log objects in OpenGL.logs, note that + this means you will get lots of error logging whenever you + have code that tests by trying something and catching an + error, this is intended to be turned on only during + development so that you can see why something is failing. + + Errors are normally logged to the OpenGL.errors logger. + + Only triggers if ERROR_CHECKING is True + + Default: False + + ERROR_ON_COPY -- if set to a True value before + importing the numpy/lists support modules, will + cause array operations to raise + OpenGL.error.CopyError if the operation + would cause a data-copy in order to make the + passed data-type match the target data-type. + + This effectively disables all list/tuple array + support, as they are inherently copy-based. + + This feature allows for optimisation of your + application. It should only be enabled during + testing stages to prevent raising errors on + recoverable conditions at run-time. + + Default: False + + CONTEXT_CHECKING -- if set to True, PyOpenGL will wrap + *every* GL and GLU call with a check to see if there + is a valid context. If there is no valid context + then will throw OpenGL.errors.NoContext. This is an + *extremely* slow check and is not enabled by default, + intended to be enabled in order to track down (wrong) + code that uses GL/GLU entry points before the context + has been initialized (something later Linux GLs are + very picky about). + + Default: False + + STORE_POINTERS -- if set to True, PyOpenGL array operations + will attempt to store references to pointers which are + being passed in order to prevent memory-access failures + if the pointed-to-object goes out of scope. This + behaviour is primarily intended to allow temporary arrays + to be created without causing memory errors, thus it is + trading off performance for safety. + + To use this flag effectively, you will want to first set + ERROR_ON_COPY to True and eliminate all cases where you + are copying arrays. Copied arrays *will* segfault your + application deep within the GL if you disable this feature! + + Once you have eliminated all copying of arrays in your + application, you will further need to be sure that all + arrays which are passed to the GL are stored for at least + the time period for which they are active in the GL. That + is, you must be sure that your array objects live at least + until they are no longer bound in the GL. This is something + you need to confirm by thinking about your application's + structure. + + When you are sure your arrays won't cause seg-faults, you + can set STORE_POINTERS=False in your application and enjoy + a (slight) speed up. + + Note: this flag is *only* observed when ERROR_ON_COPY == True, + as a safety measure to prevent pointless segfaults + + Default: True + + WARN_ON_FORMAT_UNAVAILABLE -- If True, generates + logging-module warn-level events when a FormatHandler + plugin is not loadable (with traceback). + + Default: False + + FULL_LOGGING -- If True, then wrap functions with + logging operations which reports each call along with its + arguments to the OpenGL.calltrace logger at the INFO + level. This is *extremely* slow. You should *not* enable + this in production code! + + You will need to have a logging configuration (e.g. + logging.basicConfig() + ) call in your top-level script to see the results of the + logging. + + Default: False + + ALLOW_NUMPY_SCALARS -- if True, we will wrap + all GLint/GLfloat calls conversions with wrappers + that allow for passing numpy scalar values. + + Note that this is experimental, *not* reliable, + and very slow! + + Note that byte/char types are not wrapped. + + Default: False + + UNSIGNED_BYTE_IMAGES_AS_STRING -- if True, we will return + GL_UNSIGNED_BYTE image-data as strings, instead of arrays + for glReadPixels and glGetTexImage + + Default: True + + FORWARD_COMPATIBLE_ONLY -- only include OpenGL 3.1 compatible + entry points. Note that this will generally break most + PyOpenGL code that hasn't been explicitly made "legacy free" + via a significant rewrite. + + Default: False + + SIZE_1_ARRAY_UNPACK -- if True, unpack size-1 arrays to be + scalar values, as done in PyOpenGL 1.5 -> 3.0.0, that is, + if a glGenList( 1 ) is done, return a uint rather than + an array of uints. + + Default: True + + USE_ACCELERATE -- if True, attempt to use the OpenGL_accelerate + package to provide Cython-coded accelerators for core wrapping + operations. + + Default: True + + MODULE_ANNOTATIONS -- if True, attempt to annotate alternates() and + constants to track in which module they are defined (only useful + for the documentation-generation passes, really). + + Default: False +""" +from OpenGL.version import __version__ +import os +def environ_key( name, default ): + composed = 'PYOPENGL_%s'%name.upper() + if composed in os.environ: + value = os.environ[composed] + if value.lower() in ('1','true'): + return True + else: + return False + return os.environ.get( composed, default ) + +ERROR_CHECKING = environ_key( 'ERROR_CHECKING', True) +ERROR_LOGGING = environ_key( 'ERROR_LOGGING', False ) +ERROR_ON_COPY = environ_key( 'ERROR_ON_COPY', False ) +ARRAY_SIZE_CHECKING = environ_key( 'ARRAY_SIZE_CHECKING', True ) +STORE_POINTERS = environ_key( 'STORE_POINTERS', True ) +WARN_ON_FORMAT_UNAVAILABLE = False +FORWARD_COMPATIBLE_ONLY = False +SIZE_1_ARRAY_UNPACK = True +USE_ACCELERATE = environ_key( 'USE_ACCELERATE', True ) +CONTEXT_CHECKING = environ_key( 'CONTEXT_CHECKING', False ) + +FULL_LOGGING = environ_key( 'FULL_LOGGING', False ) +ALLOW_NUMPY_SCALARS = environ_key( 'ALLOW_NUMPY_SCALARS', False ) +UNSIGNED_BYTE_IMAGES_AS_STRING = environ_key( 'UNSIGNED_BYTE_IMAGES_AS_STRING', True ) +MODULE_ANNOTATIONS = False + +# Declarations of plugins provided by PyOpenGL itself +from OpenGL.plugins import PlatformPlugin, FormatHandler +PlatformPlugin( 'nt', 'OpenGL.platform.win32.Win32Platform' ) +PlatformPlugin( 'linux2', 'OpenGL.platform.glx.GLXPlatform' ) +PlatformPlugin( 'darwin', 'OpenGL.platform.darwin.DarwinPlatform' ) +PlatformPlugin( 'posix', 'OpenGL.platform.glx.GLXPlatform' ) +PlatformPlugin( 'osmesa', 'OpenGL.platform.osmesa.OSMesaPlatform') +PlatformPlugin( 'egl', 'OpenGL.platform.egl.EGLPlatform') + +import sys +if sys.version_info[0] < 3: + # Python 3.x renames the built-in module + _bi = '__builtin__' +else: + _bi = 'builtins' + +FormatHandler( 'none', 'OpenGL.arrays.nones.NoneHandler', [ _bi+'.NoneType'],isOutput=False ) + +if sys.version_info[0] < 3: + FormatHandler( 'str', 'OpenGL.arrays.strings.StringHandler',[_bi+'.str'], isOutput=False ) + FormatHandler( 'unicode', 'OpenGL.arrays.strings.UnicodeHandler',[_bi+'.unicode'], isOutput=False ) +else: + FormatHandler( 'bytes', 'OpenGL.arrays.strings.StringHandler',[_bi+'.bytes'], isOutput=False ) + FormatHandler( 'str', 'OpenGL.arrays.strings.UnicodeHandler',[_bi+'.str'], isOutput=False ) + +FormatHandler( 'list', 'OpenGL.arrays.lists.ListHandler', [ + _bi+'.list', + _bi+'.tuple', +], isOutput=False ) +FormatHandler( 'numbers', 'OpenGL.arrays.numbers.NumberHandler', [ + _bi+'.int', + _bi+'.float', + _bi+'.long', +], isOutput=False ) +FormatHandler( + 'ctypesarrays', 'OpenGL.arrays.ctypesarrays.CtypesArrayHandler', + [ + '_ctypes.ArrayType', + '_ctypes.PyCArrayType', + '_ctypes.Array', + '_ctypes.array.Array', + ], + isOutput=True, +) +FormatHandler( + 'ctypesparameter', + 'OpenGL.arrays.ctypesparameters.CtypesParameterHandler', + [ + _bi+'.CArgObject', + 'ctypes.c_uint', + 'ctypes.c_int', + 'ctypes.c_float', + 'ctypes.c_double', + 'ctypes.c_ulong', + 'ctypes.c_long', + 'ctypes.c_longlong', + ], + isOutput=True, +) +FormatHandler( 'ctypespointer', 'OpenGL.arrays.ctypespointers.CtypesPointerHandler',[ + 'ctypes.c_void_p', + '_ctypes._Pointer', + 'ctypes.c_char_p', + '_ctypes.pointer._Pointer', +],isOutput=False ) +FormatHandler( 'numpy', 'OpenGL.arrays.numpymodule.NumpyHandler', [ + 'numpy.ndarray', + 'numpy.core.memmap.memmap', +],isOutput=True ) +FormatHandler( 'buffer', 'OpenGL.arrays.buffers.BufferHandler', [ + 'OpenGL.arrays._buffers.Py_buffer', + _bi+'.memoryview', + _bi+'.bytearray', +],isOutput=True ) +FormatHandler( 'vbo', 'OpenGL.arrays.vbo.VBOHandler', ['OpenGL.arrays.vbo.VBO','OpenGL_accelerate.vbo.VBO'],isOutput=False ) +FormatHandler( 'vbooffset', 'OpenGL.arrays.vbo.VBOOffsetHandler', ['OpenGL.arrays.vbo.VBOOffset','OpenGL_accelerate.vbo.VBOOffset'],isOutput=False ) diff --git a/vllm/lib/python3.10/site-packages/OpenGL/_bytes.py b/vllm/lib/python3.10/site-packages/OpenGL/_bytes.py new file mode 100644 index 0000000000000000000000000000000000000000..4617a8cbd95fe2fef434ec460e9524368d6f287a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/_bytes.py @@ -0,0 +1,63 @@ +"""8-bit string definitions for Python 2/3 compatibility + +Defines the following which allow for dealing with Python 3 breakages: + + STR_IS_BYTES + STR_IS_UNICODE + + Easily checked booleans for type identities + + _NULL_8_BYTE + + An 8-bit byte with NULL (0) value + + as_8_bit( x, encoding='utf-8') + + Returns the value as the 8-bit version + + unicode -- always pointing to the unicode type + bytes -- always pointing to the 8-bit bytes type +""" +import sys + +STR_IS_BYTES = True + +if sys.version_info[:2] < (2,6): + # no bytes, traditional setup... + bytes = str +else: + bytes = bytes +try: + long = long +except NameError as err: + long = int +if sys.version_info[:2] < (3,0): + # traditional setup, with bytes defined... + unicode = unicode + _NULL_8_BYTE = '\000' + def as_8_bit( x, encoding='utf-8' ): + if isinstance( x, unicode ): + return x.encode( encoding ) + return bytes( x ) + integer_types = int,long +else: + # new setup, str is now unicode... + STR_IS_BYTES = False + _NULL_8_BYTE = bytes( '\000','latin1' ) + def as_8_bit( x, encoding='utf-8' ): + if isinstance( x,unicode ): + return x.encode(encoding) + elif isinstance( x, bytes ): + # Note: this can create an 8-bit string that is *not* in encoding, + # but that is potentially exactly what we wanted, as these can + # be arbitrary byte-streams being passed to C functions + return x + return str(x).encode( encoding ) + unicode = str + integer_types = int, + +STR_IS_UNICODE = not STR_IS_BYTES +if hasattr( sys, 'maxsize' ): + maxsize = sys.maxsize +else: + maxsize = sys.maxint diff --git a/vllm/lib/python3.10/site-packages/OpenGL/_configflags.py b/vllm/lib/python3.10/site-packages/OpenGL/_configflags.py new file mode 100644 index 0000000000000000000000000000000000000000..dff961b28e9dbbb933a33fa99d70062a736b00ca --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/_configflags.py @@ -0,0 +1,18 @@ +"""Holds the import-time constants for various configuration flags""" +from OpenGL import ( + ERROR_CHECKING, + ERROR_LOGGING, + ERROR_ON_COPY, + ARRAY_SIZE_CHECKING, + STORE_POINTERS, + WARN_ON_FORMAT_UNAVAILABLE, + FORWARD_COMPATIBLE_ONLY, + SIZE_1_ARRAY_UNPACK, + USE_ACCELERATE, + CONTEXT_CHECKING, + + FULL_LOGGING, + ALLOW_NUMPY_SCALARS, + UNSIGNED_BYTE_IMAGES_AS_STRING, + MODULE_ANNOTATIONS, +) diff --git a/vllm/lib/python3.10/site-packages/OpenGL/_null.py b/vllm/lib/python3.10/site-packages/OpenGL/_null.py new file mode 100644 index 0000000000000000000000000000000000000000..c21981393cce58944bdbf3970582b1bd416374a2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/_null.py @@ -0,0 +1,2 @@ +"""Just a NULL object for reference by other modules""" +NULL = object() diff --git a/vllm/lib/python3.10/site-packages/OpenGL/_opaque.py b/vllm/lib/python3.10/site-packages/OpenGL/_opaque.py new file mode 100644 index 0000000000000000000000000000000000000000..42f4adca6043070aff1f9e2cde5b9464deb8aa42 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/_opaque.py @@ -0,0 +1,22 @@ +"""Data-type definitions for EGL/GLES""" +import ctypes +pointer = ctypes.pointer + +class _Opaque( ctypes.Structure ): + """An Opaque Structure reference (base class)""" +class _opaque_pointer( ctypes.POINTER( _Opaque ) ): + _type_ = _Opaque + @classmethod + def from_param( cls, value ): + return ctypes.cast( value, cls ) + @property + def address( self ): + return ctypes.addressof( self.contents ) + @property + def as_voidp( self ): + return ctypes.c_voidp( self.address ) +def opaque_pointer_cls( name ): + """Create an Opaque pointer class for the given name""" + typ = type( name, (_Opaque,), {} ) + p_typ = type( name+'_pointer', (_opaque_pointer,), {'_type_':typ}) + return p_typ diff --git a/vllm/lib/python3.10/site-packages/OpenGL/acceleratesupport.py b/vllm/lib/python3.10/site-packages/OpenGL/acceleratesupport.py new file mode 100644 index 0000000000000000000000000000000000000000..9523790247db7d55392d1e08093f54f7360c02a8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/acceleratesupport.py @@ -0,0 +1,18 @@ +"""Common code for accelerated modules""" +import logging +from OpenGL import _configflags +needed_version = (3,1,0) +_log = logging.getLogger( 'OpenGL.acceleratesupport' ) +try: + import OpenGL_accelerate + if _configflags.USE_ACCELERATE: + if OpenGL_accelerate.__version_tuple__ < needed_version: + _log.warn( """Incompatible version of OpenGL_accelerate found, need at least %s found %s""", needed_version, OpenGL_accelerate.__version_tuple__) + raise ImportError( """Old version of OpenGL_accelerate""" ) + ACCELERATE_AVAILABLE = True + _log.info( """OpenGL_accelerate module loaded""" ) + else: + raise ImportError( """Acceleration disabled""" ) +except ImportError as err: + _log.info( """No OpenGL_accelerate module loaded: %s""", err ) + ACCELERATE_AVAILABLE = False diff --git a/vllm/lib/python3.10/site-packages/OpenGL/constant.py b/vllm/lib/python3.10/site-packages/OpenGL/constant.py new file mode 100644 index 0000000000000000000000000000000000000000..ae64ba7d362cff65fc71d59b8d1336b9eee8ff49 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/constant.py @@ -0,0 +1,78 @@ +"""Implementation of OpenGL constant objects""" +import sys +from OpenGL._bytes import bytes,unicode,as_8_bit, long, integer_types, maxsize +from OpenGL import _configflags + +class Constant( object ): + """OpenGL constant that displays itself as a name rather than a value + + The purpose of this class is to make debugging OpenGL code easier, + as you recieve messages that say what value you passed in in a + human-readable form, rather than as a bald number that requires + lookup and disambiguation in the header file. + """ + def __new__( cls, name, value=None ): + """Initialise the constant with the given name and value""" + if not isinstance( value, Constant ): + if isinstance( value, float ) and cls is not FloatConstant: + return FloatConstant( name, value ) + elif isinstance( value, int ) and cls is not IntConstant: + return IntConstant( name, value ) + elif isinstance( value, long ) and cls is not LongConstant: + return LongConstant( name, value ) + elif isinstance( value, (bytes,unicode) ) and cls is not StringConstant: + return StringConstant( name, as_8_bit(value) ) + if isinstance( value, integer_types ): + if value > maxsize: # TODO: I'm guessing this should really by sizeof GLint, not + value = - (value & maxsize) + base = super(Constant,cls).__new__( cls, value ) + base.name = name + if _configflags.MODULE_ANNOTATIONS: + frame = sys._getframe().f_back + if frame and frame.f_back and '__name__' in frame.f_back.f_globals: + base.__module__ = frame.f_back.f_globals['__name__'] + return base + def __repr__( self ): + """Return the name, rather than the bald value""" + return self.name + def __getnewargs__( self ): + """Produce the new arguments for recreating the instance""" + return (self.name,) + super( Constant, self ).__getnewargs__() + +class NumericConstant( Constant ): + """Base class for numeric-value constants""" + def __str__( self ): + """Return the value as a human-friendly string""" + return '%s (%s)'%(self.name,super(Constant,self).__str__()) + def __getstate__(self): + """Retrieve state for pickle and the like""" + return self.name + def __setstate__( self, state ): + self.name = state + +class IntConstant( NumericConstant, int ): + """Integer constant""" +if int is not long: + class LongConstant( NumericConstant, long ): + """Long integer constant""" +else: + LongConstant = IntConstant +class FloatConstant( NumericConstant, float ): + """Float constant""" + +class StringConstant( Constant, bytes ): + """String constants""" + def __repr__( self ): + """Return the value as a human-friendly string""" + return '%s (%s)'%(self.name,super(Constant,self).__str__()) + +if __name__ == "__main__": + x = IntConstant( 'testint', 3 ) + y = FloatConstant( 'testfloat', 3.0 ) + z = StringConstant( 'teststr', 'some testing string' ) + + import pickle + for val in x,y,z: + restored = pickle.loads( pickle.dumps( val )) + assert restored == val, (str(restored),str(val)) + assert restored.name == val.name, (restored.name,val.name) diff --git a/vllm/lib/python3.10/site-packages/OpenGL/constants.py b/vllm/lib/python3.10/site-packages/OpenGL/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..132a440bb30c3b1065f7f36ed42fb65740d990e0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/constants.py @@ -0,0 +1,3 @@ +"""Backward-compatibility module to provide core-GL constant names""" +from OpenGL.raw.GL._types import * +from OpenGL.arrays._arrayconstants import * diff --git a/vllm/lib/python3.10/site-packages/OpenGL/contextdata.py b/vllm/lib/python3.10/site-packages/OpenGL/contextdata.py new file mode 100644 index 0000000000000000000000000000000000000000..dcf3c94429a7df583a6cc9220d6716e04dd4dfe0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/contextdata.py @@ -0,0 +1,132 @@ +"""Storage of per-context values of various types + +Because OpenGL needs persistent references to the +objects we're constructing to shadow Python objects, +we have to store references to the objects somewhere + +For any given Python GUI library, we can use a weakref +to the library's representation of the GL context to +call the cleanup function. That means some per-GUI +library code in OpenGL (or the library), but it gives +us very natural operations within OpenGL. + +Note: you can entirely disable use of this module by +setting: + + OpenGL.ERROR_ON_COPY = True + OpenGL.STORE_POINTERS = False + +before importing OpenGL functionality. +""" +from OpenGL import platform +import weakref +storedPointers = { + # map from contextID: { constant: value } +} +storedWeakPointers = { + # map from contextID: WeakValueDictionary({ constant: value }) +} +STORAGES = [ storedPointers, storedWeakPointers ] + +def getContext( context = None ): + """Get the context (if passed, just return) + + context -- the context ID, if None, the current context + """ + if context is None: + context = platform.GetCurrentContext() + if context == 0: + from OpenGL import error + raise error.Error( + """Attempt to retrieve context when no valid context""" + ) + return context +def setValue( constant, value, context=None, weak=False ): + """Set a stored value for the given context + + constant -- Normally a GL constant value, but can be any hashable value + value -- the value to be stored. If weak is true must be + weak-reference-able. If None, then the value will be deleted from + the storage + context -- the context identifier for which we're storing the value + weak -- if true, value will be stored with a weakref + Note: you should always pass the same value for "weak" for a given + constant, otherwise you will create two storages for the constant. + """ + if getattr( value, '_no_cache_', False ): + return + context = getContext( context ) + if weak: + storage = storedWeakPointers + cls = weakref.WeakValueDictionary + else: + storage = storedPointers + cls = dict + current = storage.get( context ) + if current is None: + storage[context] = current = cls() + previous = current.get( constant ) + if value is None: + try: + del current[ constant ] + except (KeyError,TypeError,ValueError) as err: + pass + else: + # XXX potential for failure here if a non-weakref-able objects + # is being stored with weak == True + current[ constant ] = value +## print 'previous', previous, value, constant + return previous +def delValue( constant, context=None ): + """Delete the specified value for the given context + + constant -- Normally a GL constant value, but can be any hashable value + context -- the context identifier for which we're storing the value + """ + context = getContext( context ) + found = False + for storage in STORAGES: + contextStorage = storage.get( context ) + if contextStorage: + try: + del contextStorage[ constant ] + found = True + except KeyError as err: + pass + return found + +def getValue( constant, context = None ): + """Get a stored value for the given constant + + constant -- unique ID for the type of data being retrieved + context -- the context ID, if None, the current context + """ + context = getContext( context ) + for storage in STORAGES: + contextStorage = storage.get( context ) + if contextStorage: + value = contextStorage.get( constant ) + if value is not None: + return value + return None + +def cleanupContext( context=None ): + """Cleanup all held pointer objects for the given context + + Warning: this is dangerous, as if you call it before a context + is destroyed you may release memory held by the context and cause + a protection fault when the GL goes to render the scene! + + Normally you will want to get the context ID explicitly and then + register cleanupContext as a weakref callback to your GUI library + Context object with the (now invalid) context ID as parameter. + """ + if context is None: + context = platform.GetCurrentContext() + for storage in STORAGES: + try: + del storedPointers[ context ] + except KeyError as err: + return False + else: + return True diff --git a/vllm/lib/python3.10/site-packages/OpenGL/converters.py b/vllm/lib/python3.10/site-packages/OpenGL/converters.py new file mode 100644 index 0000000000000000000000000000000000000000..9602c3bb1ec2cfbf9c7148671993734c0dc3e5b5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/converters.py @@ -0,0 +1,316 @@ +"""Implementations for common converter types""" +import ctypes,logging +from OpenGL._bytes import bytes, unicode, as_8_bit +from OpenGL._null import NULL +_log = logging.getLogger( 'OpenGL.converters' ) + +class Converter( object ): + """Base class for Converter types + + Converter objects are callable objects used with the + OpenGL.wrapper.Wrapper class to simplify the wrapping + of functions by collecting commonly used functionality + into a reusable function. + + Each Converter has two (class) attributes: + + argNames -- list of argument names for initialisation + indexLookups -- set of (indexname, argName,methodName) values + to lookup on wrapper. These allow us to use argument-name + references to refer to which arguments to use when + processing (prevents the need to revise pointers when + we change the API for a function). + + Converters can be any of the Wrapper API helper functions, + so the callable interface can vary among Converter classes. + """ + argNames = ( ) + indexLookups = ( ) + def __init__( self, *args, **named ): + """Store arguments in attributes + + *args -- mapped to self.argNames in order to set attributes + **named -- mapped to self.argNames by name to set attributes + """ + argNames = list(self.argNames) + for a in self.argNames: + if a in named: + setattr( self, a, named[a] ) + argNames.remove( a ) + for a,value in zip( argNames, args ): + setattr( self, a, value ) + def finalise( self, wrapper ): + """Look up our indices (where appropriate)""" + for indexname,argName,methodName in self.indexLookups: + setattr( + self, indexname, + getattr(wrapper,methodName)(getattr( self, argName )) + ) + +# Definitions of the abstract interfaces... +class PyConverter( Converter ): + """Converter sub-class for use in Wrapper.pyConverters + + This class just defines the interface for a pyConverter-style + Converter object + """ + def __call__( self, incoming, function, arguments ): + """Convert incoming argument into compatable data-types + + incoming -- the Python argument for this parameter + function -- the wrapper.Wrapper class we are supporting + arguments -- the complete set of arguments passed to the + function + + + """ + raise NotImplemented( """%s class doesn't implement __call__"""%( + self.__class__.__name__, + )) + +class CConverter( Converter ): + """Converter sub-class for use in Wrapper.cConverters + + This class just defines the interface for a cConverter-style + Converter object + """ + def __call__( self, pyArgs, index, baseOperation ): + """Calculate C-compatible Python object from Python arguments + + pyArgs -- set of Python argument objects converted by + pyConverters from the incoming arguments + index -- our index in baseOperation.cConverters + baseOperation -- the Wrapper object which we are supporting + """ + raise NotImplemented( """%s class doesn't implement __call__"""%( + self.__class__.__name__, + )) +class ReturnValues( Converter ): + """Converter sub-class for use as Wrapper.returnValues + + This class just defines the interface for a returnValues-style + Converter object + """ + def __call__( self, result, baseOperation, pyArgs, cArgs ): + """Return a final value to the caller + + result -- the raw ctypes result value + baseOperation -- the Wrapper object which we are supporting + pyArgs -- the set of Python arguments produced by pyConverters + cArgs -- the set of C-compatible arguments produced by CConverter + + return the Python object for the final result + """ + raise NotImplemented( """%s class doesn't implement __call__"""%( + self.__class__.__name__, + )) + +# Now the concrete classes... +from OpenGL import acceleratesupport +CallFuncPyConverter = None +if acceleratesupport.ACCELERATE_AVAILABLE: + try: + from OpenGL_accelerate.wrapper import ( + CallFuncPyConverter, DefaultCConverter, getPyArgsName, + ) + from OpenGL_accelerate.arraydatatype import ( + Output,SizedOutput,OutputOrInput,SizedOutputOrInput + ) + from OpenGL_accelerate.wrapper import ( + returnCArgument, returnPyArgument, + ) + except ImportError as err: + _log.warn( + "Unable to load converters accelerators (wrapper, arraydatatype) from OpenGL_accelerate" + ) + CallFuncPyConverter = None +if CallFuncPyConverter is None: + class CallFuncPyConverter( PyConverter ): + """PyConverter that takes a callable and calls it on incoming""" + def __init__( self, function ): + """Store the function""" + self.function = function + def __call__( self, incoming, function, argument ): + """Call our function on incoming""" + return self.function( incoming ) + class DefaultCConverter( CConverter ): + """NULL or Default CConverter, returns same-named Python argument + + Used primarily to allow for specifying a converter that explicitly + says "use the default behaviour". This is *not* a finalise-ing + converter, it is passed in the index explicitly and just retrieves + that index from pyArgs when called. + + Raises informative errors if the index cannot be resolved in pyArgs + """ + def __init__( self, index ): + """Just store index for future access""" + self.index = index + def __call__( self, pyArgs, index, wrapper ): + """Return pyArgs[self.index] or raise a ValueError""" + try: + return pyArgs[ self.index ] + except IndexError as err: + raise ValueError( + """Expected parameter index %r, but pyArgs only length %s"""%( + self.index, + len(pyArgs ) + )) + class getPyArgsName( CConverter ): + """CConverter returning named Python argument + + Intended for use in cConverters, the function returned + retrieves the named pyArg and returns it when called. + """ + argNames = ('name',) + indexLookups = [ ('index','name', 'pyArgIndex' ), ] + __slots__ = ( 'index', 'name') + def __call__( self, pyArgs, index, baseOperation ): + """Return pyArgs[ self.index ]""" + try: + return pyArgs[ self.index ] + except AttributeError as err: + raise RuntimeError( """"Did not resolve parameter index for %r"""%(self.name)) + + class Output( CConverter ): + """CConverter generating static-size typed output arrays + + Produces an output array of given type (arrayType) and + size using self.lookup() to determine the size of the + array to be produced, where the lookup function is passed + as an initialisation argument. + + Provides also: + + oldStyleReturn( ... ) for use in the default case of + PyOpenGL compatability mode, where result arrays of + size (1,) are returned as scalar values. + """ + argNames = ('name','size','arrayType' ) + indexLookups = [ + ('outIndex','name', 'cArgIndex' ), + ] + __slots__ = ('index','size','arrayType','outIndex','inIndex') + def __call__( self, pyArgs, index, baseOperation ): + """Return pyArgs[ self.index ]""" + return self.arrayType.zeros( self.getSize(pyArgs) ) + def getSize( self, pyArgs ): + """Retrieve the array size for this argument""" + return self.size + def oldStyleReturn( self, result, baseOperation, pyArgs, cArgs ): + """Retrieve cArgs[ self.index ]""" + result = cArgs[ self.outIndex ] + try: + thisSize = self.getSize(pyArgs) + except KeyError as err: + return result + if thisSize == (1,): + try: + return result[0] + except TypeError as err: + return result + else: + return result + class OutputOrInput( Output ): + DO_OUTPUT = (None,NULL) + def __call__( self, pyArgs, index, baseOperation ): + for do_output in self.DO_OUTPUT: + if pyArgs[index] is do_output: + return super( OutputOrInput,self ).__call__( pyArgs, index, baseOperation ) + return self.arrayType.asArray( pyArgs[index] ) + + class SizedOutput( Output ): + """Output generating dynamically-sized typed output arrays + + Takes an extra parameter "specifier", which is the name of + a Python argument to be passed to the lookup function in order + to determine the appropriate size for the output array. + """ + argNames = ('name','specifier','lookup','arrayType' ) + indexLookups = [ + ('outIndex','name', 'cArgIndex' ), + ('index','specifier', 'pyArgIndex' ), + ] + __slots__ = ('index','specifier','lookup','arrayType') + def getSize( self, pyArgs ): + """Retrieve the array size for this argument""" + try: + specifier = pyArgs[ self.index ] + except AttributeError as err: + raise RuntimeError( """"Did not resolve parameter index for %r"""%(self.name)) + else: + try: + return self.lookup( specifier ) + except KeyError as err: + raise KeyError( """Unknown specifier %s"""%( specifier )) + class SizedOutputOrInput( SizedOutput ): + DO_OUTPUT = (None,NULL) + def __call__( self, pyArgs, index, baseOperation ): + for do_output in self.DO_OUTPUT: + if pyArgs[index] is do_output: + return super( SizedOutputOrInput,self ).__call__( pyArgs, index, baseOperation ) + return self.arrayType.asArray( pyArgs[index] ) + class returnCArgument( ReturnValues ): + """ReturnValues returning the named cArgs value""" + argNames = ('name',) + indexLookups = [ ('index','name', 'cArgIndex' ), ] + __slots__ = ( 'index', 'name' ) + def __call__( self, result, baseOperation, pyArgs, cArgs ): + """Retrieve cArgs[ self.index ]""" + return cArgs[self.index] + + class returnPyArgument( ReturnValues ): + """ReturnValues returning the named pyArgs value""" + argNames = ('name',) + indexLookups = [ ('index','name', 'pyArgIndex' ), ] + __slots__ = ( 'index', 'name' ) + def __call__( self, result, baseOperation, pyArgs, cArgs ): + """Retrieve pyArgs[ self.index ]""" + return pyArgs[self.index] + +class StringLengths( CConverter ): + """CConverter for processing array-of-pointers-to-strings data-type + + Converter is a CConverter for the array-of-lengths for a + array-of-pointers-to-strings data-type used to pass a set + of code fragments to the GLSL compiler. + + Provides also: + + stringArray -- PyConverter callable ensuring list-of-strings + format for the python argument + + stringArrayForC -- CResolver converting the array to + POINTER(c_char_p) format for passing to C + + totalCount -- CConverter callable giving count of string + pointers (that is, length of the pointer array) + """ + argNames = ('name',) + indexLookups = [ ('index','name', 'pyArgIndex' ), ] + __slots__ = () + def __call__( self, pyArgs, index, baseOperation ): + """Get array of length integers for string contents""" + from OpenGL.raw.GL import _types + tmp = [len(x) for x in pyArgs[self.index]] + a_type = _types.GLint * len(tmp) + return a_type( *tmp ) + def totalCount( self, pyArgs, index, baseOperation ): + """Get array of length integers for string contents""" + return len(pyArgs[self.index]) + def stringArray( self, arg, baseOperation, args ): + """Create basic array-of-strings object from pyArg""" + if isinstance( arg, (bytes,unicode) ): + arg = [arg] + value = [as_8_bit(x) for x in arg] + return value + def stringArrayForC( self, strings ): + """Create a ctypes pointer to char-pointer set""" + from OpenGL import arrays + result = (ctypes.c_char_p * len(strings))() + for i,s in enumerate(strings): + result[i] = ctypes.cast( + arrays.GLcharARBArray.dataPointer(s), + ctypes.c_char_p, + ) + return result diff --git a/vllm/lib/python3.10/site-packages/OpenGL/error.py b/vllm/lib/python3.10/site-packages/OpenGL/error.py new file mode 100644 index 0000000000000000000000000000000000000000..f0143584a1d90b992d8b0273b778fc08e4d8a4cf --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/error.py @@ -0,0 +1,246 @@ +"""Implementation of OpenGL errors/exceptions + +Note that OpenGL-ctypes will also throw standard errors, +such as TypeError or ValueError when appropriate. + +ErrorChecker is an _ErrorChecker instance that allows you +to register a new error-checking function for use +throughout the system. +""" +import logging +_log = logging.getLogger( 'OpenGL.error' ) +from OpenGL import platform, _configflags +from ctypes import ArgumentError +__all__ = ( + "Error",'GLError','GLUError','GLUTError', + 'GLerror','GLUerror','GLUTerror','ArgumentError', +) + +class Error( Exception ): + """Base class for all PyOpenGL-specific exception classes""" +class NoContext( Error ): + """Raised to indicate that there is no currently active context + + Technically almost *any* OpenGL call can segfault if there is + no active context. The OpenGL.CHECK_CONTEXT flag, if enabled + will cause this error to be raised whenever a GL or GLU call is + issued (via PyOpenGL) if there is no currently valid context. + """ +class CopyError( Error ): + """Raised to indicate that operation requires data-copying + + if you set: + OpenGL.ERROR_ON_COPY = True + + before importing OpenGL.GL, this error will be raised when + a passed argument would require a copy to be made. + """ + +class NullFunctionError( Error ): + """Error raised when an undefined function is called""" + +class GLError( Error ): + """OpenGL core error implementation class + + Primary purpose of this error class is to allow for + annotating an error with more details about the calling + environment so that it's easier to debug errors in the + wrapping process. + + Attributes: + + err -- the OpenGL error code for the error + result -- the OpenGL result code for the operation + baseOperation -- the "function" being called + pyArgs -- the translated set of Python arguments + cArgs -- the Python objects matching 1:1 the C arguments + cArguments -- ctypes-level arguments to the operation, + often raw integers for pointers and the like + description -- OpenGL description of the error (textual) + """ + def __init__( + self, + err=None, + result=None, + cArguments=None, + baseOperation=None, + pyArgs=None, + cArgs=None, + description=None, + ): + """Initialise the GLError, storing metadata for later display""" + ( + self.err, self.result, self.cArguments, + self.baseOperation, self.pyArgs, self.cArgs, + self.description + ) = ( + err, result, cArguments, + baseOperation, pyArgs, cArgs, + description + ) + DISPLAY_ORDER = ( + 'err', + 'description', + 'baseOperation', + 'pyArgs', + 'cArgs', + 'cArguments', + 'result', + ) + def __str__( self ): + """Create a fully formatted representation of the error""" + args = [] + for property in self.DISPLAY_ORDER: + value = getattr( self, property, None ) + if value is not None or property=='description': + formatFunction = 'format_%s'%(property) + if hasattr( self, formatFunction ): + args.append( getattr(self,formatFunction)( property, value )) + else: + args.append( '%s = %s'%( + property, + self.shortRepr( value ), + )) + return '%s(\n\t%s\n)'%(self.__class__.__name__, ',\n\t'.join( + [x for x in args if x] + )) + def __repr__( self ): + """Produce a much shorter version of the error as a string""" + return '%s( %s )'%( + self.__class__.__name__, + ", ".join([x for x in [ + 'err=%s'%(self.err), + self.format_description( 'description', self.description ) or '', + self.format_baseOperation( 'baseOperation', self.baseOperation ) or '', + ] if x]) + ) + def format_description( self, property, value ): + """Format description using GLU's gluErrorString""" + if value is None and self.err is not None: + try: + from OpenGL.GLU import gluErrorString + self.description = value = gluErrorString( self.err ) + except Exception as err: + return None + if value is None: + return None + return '%s = %s'%( + property, + self.shortRepr( value ), + ) + def shortRepr( self, value, firstLevel=True ): + """Retrieve short representation of the given value""" + if isinstance( value, (list,tuple) ) and value and len(repr(value))>=40: + if isinstance( value, list ): + template = '[\n\t\t%s\n\t]' + else: + template = '(\n\t\t%s,\n\t)' + return template%( ",\n\t\t".join( + [ + self.shortRepr(x,False) for x in value + ] + )) + r = repr( value ) + if len(r) < 120: + return r + else: + return r[:117] + '...' + def format_baseOperation( self, property, value ): + """Format a baseOperation reference for display""" + if hasattr( value, '__name__' ): + return '%s = %s'%( property, value.__name__ ) + else: + return '%s = %r'%( property, value ) + +class GLUError( Error ): + """GLU error implementation class""" + +class GLUTError( Error ): + """GLUT error implementation class""" + + +if _configflags.ERROR_CHECKING: + from OpenGL import acceleratesupport + _ErrorChecker = None + if acceleratesupport.ACCELERATE_AVAILABLE: + try: + from OpenGL_accelerate.errorchecker import _ErrorChecker + except ImportError as err: + _log.warn( """OpenGL_accelerate seems to be installed, but unable to import error checking entry point!""" ) + if _ErrorChecker is None: + class _ErrorChecker( object ): + """Per-API error-checking object + + Attributes: + _registeredChecker -- the checking function enabled when + not doing onBegin/onEnd processing + _currentChecker -- currently active checking function + """ + _getErrors = None + def __init__( self, platform, baseOperation=None, noErrorResult=0 ): + """Initialize from a platform module/reference""" + self._isValid = platform.CurrentContextIsValid + self._getErrors = baseOperation + self._noErrorResult = noErrorResult + if self._getErrors: + if _configflags.CONTEXT_CHECKING: + self._registeredChecker = self.safeGetError + else: + self._registeredChecker = self._getErrors + else: + self._registeredChecker = self.nullGetError + self._currentChecker = self._registeredChecker + def __bool__( self ): + """We are "true" if we actually do anything""" + if self._registeredChecker is self.nullGetError: + return False + return True + def safeGetError( self ): + """Check for error, testing for context before operation""" + if self._isValid(): + return self._getErrors() + return None + def nullGetError( self ): + """Used as error-checker when no error checking should be done""" + return self._noErrorResult + def glCheckError( + self, + result, + baseOperation=None, + cArguments=None, + *args + ): + """Base GL Error checker compatible with new ctypes errcheck protocol + + This function will raise a GLError with just the calling information + available at the C-calling level, i.e. the error code, cArguments, + baseOperation and result. Higher-level code is responsible for any + extra annotations. + + Note: + glCheckError relies on glBegin/glEnd interactions to + prevent glGetError being called during a glBegin/glEnd + sequence. If you are calling glBegin/glEnd in C you + should call onBegin and onEnd appropriately. + """ + err = self._currentChecker() + if err != self._noErrorResult: + raise GLError( + err, + result, + cArguments = cArguments, + baseOperation = baseOperation, + ) + return result + def onBegin( self ): + """Called by glBegin to record the fact that glGetError won't work""" + self._currentChecker = self.nullGetError + def onEnd( self ): + """Called by glEnd to record the fact that glGetError will work""" + self._currentChecker = self._registeredChecker +else: + _ErrorChecker = None +# Compatibility with PyOpenGL 2.x series +GLUerror = GLUError +GLerror = GLError +GLUTerror = GLUTError diff --git a/vllm/lib/python3.10/site-packages/OpenGL/extensions.py b/vllm/lib/python3.10/site-packages/OpenGL/extensions.py new file mode 100644 index 0000000000000000000000000000000000000000..34402aa142c2ef31bd52d4e6468960015e16675f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/extensions.py @@ -0,0 +1,256 @@ +"""Extension module support methods + +This module provides the tools required to check whether +an extension is available +""" +from OpenGL.latebind import LateBind +from OpenGL._bytes import bytes,unicode,as_8_bit +import OpenGL as root +import sys +import logging +_log = logging.getLogger( 'OpenGL.extensions' ) +VERSION_PREFIX = as_8_bit('GL_VERSION_GL_') +CURRENT_GL_VERSION = None +AVAILABLE_GL_EXTENSIONS = [] +AVAILABLE_GLU_EXTENSIONS = [] + +# version tuple -> list of implicitly included extensions... +VERSION_EXTENSIONS = [ + ((3,0), [ + as_8_bit('GL_ARB_vertex_array_object'), + as_8_bit('GL_ARB_texture_buffer_object'), + as_8_bit('GL_ARB_framebuffer_object'), + as_8_bit('GL_ARB_map_buffer_range'), + ]), + ((3,1), [ + as_8_bit('GL_ARB_copy_buffer'), + as_8_bit('GL_ARB_uniform_buffer_object'), + ]), + ((3,2), [ + as_8_bit('GL_ARB_draw_elements_base_vertex'), + as_8_bit('GL_ARB_provoking_vertex'), + as_8_bit('GL_ARB_sync'), + as_8_bit('GL_ARB_texture_multisample'), + ]), + ((3,3), [ + as_8_bit('GL_ARB_texture_multisample'), + as_8_bit('GL_ARB_blend_func_extended'), + as_8_bit('GL_ARB_sampler_objects'), + as_8_bit('GL_ARB_explicit_attrib_location'), + as_8_bit('GL_ARB_occlusion_query2'), + as_8_bit('GL_ARB_shader_bit_encoding'), + as_8_bit('GL_ARB_texture_rgb10_a2ui'), + as_8_bit('GL_ARB_texture_swizzle'), + as_8_bit('GL_ARB_timer_query'), + as_8_bit('GL_ARB_vertex_type_2_10_10_10_rev'), + ]), + ((4,0), [ + as_8_bit('GL_ARB_texture_query_lod'), + as_8_bit('GL_ARB_draw_indirect'), + as_8_bit('GL_ARB_gpu_shader5'), + as_8_bit('GL_ARB_gpu_shader_fp64'), + as_8_bit('GL_ARB_shader_subroutine'), + as_8_bit('GL_ARB_tessellation_shader'), + as_8_bit('GL_ARB_texture_buffer_object_rgb32'), + as_8_bit('GL_ARB_texture_cube_map_array'), + as_8_bit('GL_ARB_texture_gather'), + as_8_bit('GL_ARB_transform_feedback2'), + as_8_bit('GL_ARB_transform_feedback3'), + ]), + ((4,1), [ + as_8_bit('GL_ARB_ES2_compatibility'), + as_8_bit('GL_ARB_get_program_binary'), + as_8_bit('GL_ARB_separate_shader_objects'), + as_8_bit('GL_ARB_shader_precision'), + as_8_bit('GL_ARB_vertex_attrib_64bit'), + as_8_bit('GL_ARB_viewport_array'), + ]), + ((4,2), [ + as_8_bit('GL_ARB_base_instance'), + as_8_bit('GL_ARB_shading_language_420pack'), + as_8_bit('GL_ARB_transform_feedback_instanced'), + as_8_bit('GL_ARB_compressed_texture_pixel_storage'), + as_8_bit('GL_ARB_conservative_depth'), + as_8_bit('GL_ARB_internalformat_query'), + as_8_bit('GL_ARB_map_buffer_alignment'), + as_8_bit('GL_ARB_shader_atomic_counters'), + as_8_bit('GL_ARB_shader_image_load_store'), + as_8_bit('GL_ARB_shading_language_packing'), + as_8_bit('GL_ARB_texture_storage'), + ]), +] + +class ExtensionQuerier( object ): + prefix = None + version_prefix = None + assumed_version = [1,0] + + version = extensions = None + version_string = extensions_string = None + + registered = [] + def __init__( self ): + self.registered.append( self ) + + @classmethod + def hasExtension( self, specifier ): + for registered in self.registered: + result = registered( specifier ) + if result: + return result + return False + + def __call__( self, specifier ): + specifier = as_8_bit(specifier).replace(as_8_bit('.'),as_8_bit('_')) + if not specifier.startswith( self.prefix ): + return None + + if specifier.startswith( self.version_prefix ): + specifier = [ + int(x) + for x in specifier[ len(self.version_prefix):].split(as_8_bit('_')) + ] + if specifier[:2] <= self.assumed_version: + return True + version = self.getVersion() + if not version: + return version + return specifier <= version + else: + extensions = self.getExtensions() + return extensions and specifier in extensions + def getVersion( self ): + if not self.version: + self.version = self.pullVersion() + return self.version + def getExtensions( self ): + if not self.extensions: + self.extensions = self.pullExtensions() + return self.extensions + +class _GLQuerier( ExtensionQuerier ): + prefix = as_8_bit('GL_') + version_prefix = as_8_bit('GL_VERSION_GL_') + assumed_version = [1,1] + def pullVersion( self ): + """Retrieve 2-int declaration of major/minor GL version + + returns [int(major),int(minor)] or False if not loaded + """ + from OpenGL import platform + if not platform.PLATFORM.CurrentContextIsValid(): + return False + from OpenGL.raw.GL.VERSION.GL_1_1 import glGetString + from OpenGL.raw.GL.VERSION.GL_1_1 import GL_VERSION + new = glGetString( GL_VERSION ) + + self.version_string = new + if new: + return [ + int(x) for x in new.split(as_8_bit(' '),1)[0].split( as_8_bit('.') ) + ] + else: + return False # not yet loaded/supported + def pullExtensions( self ): + from OpenGL import platform + if not platform.PLATFORM.CurrentContextIsValid(): + return False + from OpenGL.raw.GL._types import GLint + from OpenGL.raw.GL.VERSION.GL_1_1 import glGetString, glGetError + from OpenGL.raw.GL.VERSION.GL_1_1 import GL_EXTENSIONS + from OpenGL import error + try: + extensions = glGetString( GL_EXTENSIONS ) + if glGetError(): + raise error.GLError() + if extensions: + extensions = extensions.split() + else: + return False + except (AttributeError, error.GLError) as err: + # OpenGL 3.0 deprecates glGetString( GL_EXTENSIONS ) + from OpenGL.raw.GL.VERSION.GL_3_0 import GL_NUM_EXTENSIONS, glGetStringi + from OpenGL.raw.GL.VERSION.GL_1_1 import glGetIntegerv + count = GLint() + glGetIntegerv( GL_NUM_EXTENSIONS, count ) + extensions = [] + for i in range( count.value ): + extension = glGetStringi( GL_EXTENSIONS, i ) + extensions.append( + extension + ) + # Add included-by-reference extensions... + version = self.getVersion() + if not version: + # should not be possible? + return version + check = tuple( version[:2] ) + for (v,v_exts) in VERSION_EXTENSIONS: + if v <= check: + for v_ext in v_exts: + if v_ext not in extensions: + extensions.append( as_8_bit(v_ext) ) + else: + break + return extensions +GLQuerier = _GLQuerier() +class _GLUQuerier( ExtensionQuerier ): + prefix = as_8_bit('GLU_') + version_prefix = as_8_bit('GLU_VERSION_GL_') + def pullVersion( self ): + from OpenGL.GLU import gluGetString,GLU_VERSION + return [ + int(x) for x in gluGetString( GLU_VERSION ).split('_') + if x.isdigit() + ] + def pullExtensions( self ): + from OpenGL.GLU import gluGetString,GLU_EXTENSIONS + return gluGetString( GLU_EXTENSIONS ).split() +GLUQuerier = _GLUQuerier() + +def hasExtension( specifier ): + return ExtensionQuerier.hasExtension( specifier ) +hasGLExtension = hasGLUExtension = hasExtension + +class _Alternate( LateBind ): + def __init__( self, name, *alternates ): + """Initialize set of alternative implementations of the same function""" + self.__name__ = name + self._alternatives = alternates + if root.MODULE_ANNOTATIONS: + frame = sys._getframe().f_back + if frame and frame.f_back and '__name__' in frame.f_back.f_globals: + self.__module__ = frame.f_back.f_globals['__name__'] + def __bool__( self ): + from OpenGL import error + try: + return bool( self.getFinalCall()) + except error.NullFunctionError as err: + return False + __nonzero__ = __bool__ # Python 2.6 compatibility + def finalise( self ): + """Call, doing a late lookup and bind to find an implementation""" + for alternate in self._alternatives: + if alternate: +# _log.info( +# """Chose alternate: %s from %s""", +# alternate.__name__, +# ", ".join([x.__name__ for x in self._alternatives]) +# ) + return alternate + from OpenGL import error + raise error.NullFunctionError( + """Attempt to call an undefined alternate function (%s), check for bool(%s) before calling"""%( + ', '.join([x.__name__ for x in self._alternatives]), + self.__name__, + ) + ) +def alternate( name, *functions ): + """Construct a callable that functions as the first implementation found of given set of alternatives + + if name is a function then its name will be used.... + """ + if not isinstance( name, (bytes,unicode)): + functions = (name,)+functions + name = name.__name__ + return type( name, (_Alternate,), {} )( name, *functions ) diff --git a/vllm/lib/python3.10/site-packages/OpenGL/images.py b/vllm/lib/python3.10/site-packages/OpenGL/images.py new file mode 100644 index 0000000000000000000000000000000000000000..f9b3f72d306b6bd0b9aa644d4f9dfe366e6e7056 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/images.py @@ -0,0 +1,160 @@ +"""Image/texture implementation code + +This module provides the Pan-OpenGL operations required to support OpenGL +image handling. Most of this code is simply boilerplate code that sets +OpenGL parameters such that normal Pythonic assumptions about data-ordering +are met to allow easier interaction with other projects (such as PIL or +Numpy). + +Generally speaking, there are 3 pieces of information which control how +an image is processed in the system: + + format -- this is the pixel format, such as GL_RGB/GL_RED/GL_ABGR_EXT + dims -- tuple of dimensions for the image, (width,height,depth) order + type -- the storage data-type for the image, normally GL_UNSIGNED_BYTE + when working in Python, but all of the standard OpenGL types for + images can be used if you happen to have your data in some exotic + format. + + OpenGL.UNSIGNED_BYTE_IMAGES_AS_STRING -- if this global value is set, + then read of unsigned byte images using glReadPixels and + glGetTexImage produces a string instead of the default array format. + +Attributes of Note: + + COMPONENT_COUNTS -- used to lookup how many units of a + given storage type are required to store a unit in a given format + + TYPE_TO_ARRAYTYPE -- maps Image storage types to their array data-type + constants, i.e. maps GL_UNSIGNED_SHORT_4_4_4_4 to GL_UNSIGNED_SHORT + so that we can use the standard array types for manipulating + image arrays. + + RANK_PACKINGS -- commands required to set up default array-transfer + operations for an array of the specified rank. + +New image formats and types will need to be registered here to be supported, +this means that extension modules which add image types/formats need to alter +the tables described above! + + XXX Should be an API to handle that instead of direct modification. + +""" +from OpenGL.raw.GL.VERSION import GL_1_1 as _simple +from OpenGL import arrays +from OpenGL import _configflags +import ctypes + +def SetupPixelRead( format, dims, type): + """Setup transfer mode for a read into a numpy array return the array + + Calls setupDefaultTransferMode, sets rankPacking and then + returns a createTargetArray for the parameters. + """ + setupDefaultTransferMode() + # XXX this is wrong? dims may grow or it may not, depends on whether + # the format can fit in the type or not, but rank is a property of the + # image itself? Don't know, should test. + rankPacking( len(dims)+1 ) + return createTargetArray( format, dims, type ) + +def setupDefaultTransferMode( ): + """Set pixel transfer mode to assumed internal structure of arrays + + Basically OpenGL-ctypes (and PyOpenGL) assume that your image data is in + non-byte-swapped order, with big-endian ordering of bytes (though that + seldom matters in image data). These assumptions are normally correct + when dealing with Python libraries which expose byte-arrays. + """ + _simple.glPixelStorei(_simple.GL_PACK_SWAP_BYTES, 0) + _simple.glPixelStorei(_simple.GL_PACK_LSB_FIRST, 0) +def rankPacking( rank ): + """Set the pixel-transfer modes for a given image "rank" (# of dims) + + Uses RANK_PACKINGS table to issue calls to glPixelStorei + """ + for func,which,arg in RANK_PACKINGS[rank]: + try: + func(which,arg) + except Exception as err: + # XXX should be logging a warning! + pass + +def createTargetArray( format, dims, type ): + """Create storage array for given parameters + + If storage type requires > 1 unit per format pixel, then dims will be + extended by 1, so in the common case of RGB and GL_UNSIGNED_BYTE you + will wind up with an array of dims + (3,) dimensions. See + COMPONENT_COUNTS for table which controls which formats produce + larger dimensions. The secondary table TIGHT_PACK_FORMATS overrides + this case, so that image formats registered as TIGHT_PACK_FORMATS + only ever return a dims-shaped value. TIGHT_PACK_FORMATS will raise + ValueErrors if they are used with a format that does not have the same + number of components as they define. + + Note that the base storage type must provide a zeros method. The zeros + method relies on their being a registered default array-implementation for + the storage type. The default installation of OpenGL-ctypes will use + Numpy arrays for returning the result. + """ + # calculate the number of storage elements required to store + # a single pixel of format, that's the dimension of the resulting array + componentCount = formatToComponentCount( format ) + if componentCount > 1: + if type not in TIGHT_PACK_FORMATS: + # requires multiple elements to store a single pixel (common) + # e.g. byte array (typeBits = 8) with RGB (24) or RGBA (32) + dims += (componentCount, ) + elif TIGHT_PACK_FORMATS[ type ] < componentCount: + raise ValueError( + """Image type: %s supports %s components, but format %s requires %s components"""%( + type, + TIGHT_PACK_FORMATS[ type ], + format, + componentCount, + ) + ) + arrayType = arrays.GL_CONSTANT_TO_ARRAY_TYPE[ TYPE_TO_ARRAYTYPE.get(type,type) ] + return arrayType.zeros( dims ) + +def formatToComponentCount( format ): + """Given an OpenGL image format specification, get components/pixel""" + size = COMPONENT_COUNTS.get( format ) + if size is None: + raise ValueError( """Unrecognised image format: %r"""%(format,)) + return size + +def returnFormat( data, type ): + """Perform compatibility conversion for PyOpenGL 2.x image-as string results + + Uses OpenGL.UNSIGNED_BYTE_IMAGES_AS_STRING to control whether to perform the + conversions. + """ + if _configflags.UNSIGNED_BYTE_IMAGES_AS_STRING: + if type == _simple.GL_UNSIGNED_BYTE: + if hasattr( data, 'tostring' ): + return data.tostring() + elif hasattr( data, 'raw' ): + return data.raw + elif hasattr( data, '_type_' ): + s = ctypes.string_at( ctypes.cast( data, ctypes.c_voidp ), ctypes.sizeof( data )) + result = s[:] # copy into a new string + return s + return data + + +COMPONENT_COUNTS = { + # Image-format-constant: number-of-components (integer) +} +TYPE_TO_BITS = { + # GL-image-storage-type-constant: number-of-bits (integer) +} +TYPE_TO_ARRAYTYPE = { + # GL-image-storage-type-constant: GL-datatype (constant) +} +TIGHT_PACK_FORMATS = { +} +RANK_PACKINGS = { + # rank (integer): list of (function,**arg) to setup for that rank +} diff --git a/vllm/lib/python3.10/site-packages/OpenGL/latebind.py b/vllm/lib/python3.10/site-packages/OpenGL/latebind.py new file mode 100644 index 0000000000000000000000000000000000000000..dc28dc28a539753f3ea36ed5b7344abc9873e5be --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/latebind.py @@ -0,0 +1,61 @@ +"""Late-bound base-class (with acceleration)""" +from OpenGL import acceleratesupport +LateBind = Curry = None +if acceleratesupport.ACCELERATE_AVAILABLE: + try: + from OpenGL_accelerate.latebind import LateBind, Curry + except ImportError as err: + pass +if LateBind is None: + class LateBind(object): + """Provides a __call__ which dispatches to self._finalCall + + When called without self._finalCall() makes a call to + self.finalise() and then calls self._finalCall() + """ + _finalCall = None + def setFinalCall( self, finalCall ): + """Set our finalCall to the callable object given""" + self._finalCall = finalCall + def getFinalCall( self ): + """Retrieve and/or bind and retrieve final call""" + if not self._finalCall: + self._finalCall = self.finalise() + return self._finalCall + + + def finalise( self ): + """Finalise our target to our final callable object + + return final callable + """ + + def __call__( self, *args, **named ): + """Call self._finalCall, calling finalise() first if not already called + + There's actually *no* reason to unpack and repack the arguments, + but unfortunately I don't know of a Cython syntax to specify + that. + """ + try: + return self._finalCall( *args, **named ) + except (TypeError,AttributeError) as err: + if self._finalCall is None: + self._finalCall = self.finalise() + return self._finalCall( *args, **named ) +if Curry is None: + class Curry(object): + """Provides a simple Curry which can bind (only) the first element + + This is used by lazywrapper, which explains the weird naming + of the two attributes... + """ + wrapperFunction = None + baseFunction = None + def __init__( self, wrapperFunction, baseFunction ): + """Stores self.wrapperFunction and self.baseFunction""" + self.baseFunction = baseFunction + self.wrapperFunction = wrapperFunction + def __call__( self, *args, **named ): + """returns self.wrapperFunction( self.baseFunction, *args, **named )""" + return self.wrapperFunction( self.baseFunction, *args, **named ) diff --git a/vllm/lib/python3.10/site-packages/OpenGL/lazywrapper.py b/vllm/lib/python3.10/site-packages/OpenGL/lazywrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..4782738ff42e840336c7c1fb011b4d82e24b5516 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/lazywrapper.py @@ -0,0 +1,58 @@ +"""Simplistic wrapper decorator for Python-coded wrappers""" +from OpenGL.latebind import Curry +from OpenGL import MODULE_ANNOTATIONS + +class _LazyWrapper( Curry ): + """Marker to tell us that an object is a lazy wrapper""" + +def lazy( baseFunction ): + """Produce a lazy-binding decorator that uses baseFunction + + Allows simple implementation of wrappers where the + whole of the wrapper can be summed up as do 1 thing + then call base function with the cleaned up result. + + Passes baseFunction in as the first argument of the + wrapped function, all other parameters are passed + unchanged. The wrapper class created has __nonzero__ + and similar common wrapper entry points defined. + """ + def wrap( wrapper ): + """Wrap wrapper with baseFunction""" + def __nonzero__( self ): + return bool( baseFunction ) + def __repr__( self ): + return '%s( %r )'%( + 'OpenGL.lazywrapper.lazy', + baseFunction.__name__, + ) + _with_wrapper = type( wrapper.__name__, (_LazyWrapper,), { + '__repr__': __repr__, + '__doc__': wrapper.__doc__, + '__nonzero__': __nonzero__, + 'wrappedOperation': baseFunction, + 'restype': getattr(wrapper, 'restype',getattr(baseFunction,'restype',None)), + } ) + with_wrapper = _with_wrapper(wrapper,baseFunction) + with_wrapper.__name__ = wrapper.__name__ + if hasattr( baseFunction, '__module__' ): + with_wrapper.__module__ = baseFunction.__module__ + return with_wrapper + return wrap + + +if __name__ == "__main__": + from OpenGL.raw import GLU + func = GLU.gluNurbsCallbackData + output = [] + def testwrap( base ): + "Testing" + output.append( base ) + testlazy = lazy( func )( testwrap ) + testlazy( ) + assert testlazy.__doc__ == "Testing" + assert testlazy.__class__.__name__ == 'testwrap' + assert testlazy.__name__ == 'testwrap' + assert testlazy.baseFunction is func + assert testlazy.wrapperFunction is testwrap + assert output diff --git a/vllm/lib/python3.10/site-packages/OpenGL/logs.py b/vllm/lib/python3.10/site-packages/OpenGL/logs.py new file mode 100644 index 0000000000000000000000000000000000000000..155e46fc39a8b17b626aea02ab70801720317584 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/logs.py @@ -0,0 +1,91 @@ +"""Fix missing-API problems in logging module (circa Python 2.3) + +Adds constants to the log objects. +Adds getException(err) to log objects to retrieve +formatted exception or err if traceback not available. +""" +import traceback, logging +from OpenGL._configflags import ERROR_LOGGING, FULL_LOGGING +getLog = logging.getLogger + +def getException(error): + """Get formatted traceback from exception""" + try: + return traceback.format_exc( limit=10 ) + except Exception as err: + return str( error ) + +logging.Logger.getException = staticmethod( getException ) +logging.Logger.err = logging.Logger.error +logging.Logger.DEBUG = logging.DEBUG +logging.Logger.WARN = logging.WARN +logging.Logger.INFO = logging.INFO +logging.Logger.ERR = logging.Logger.ERROR = logging.ERROR + +if FULL_LOGGING: + getLog( 'OpenGL.calltrace' ).setLevel( logging.INFO ) + +class _LoggedFunction( object ): + """Proxy that overrides __call__ to log arguments""" + def __init__( self, base, log ): + self.__dict__[''] = base + self.__dict__['log'] = log + def __setattr__( self, key, value ): + if key != '': + setattr( self.__dict__[''], key, value ) + else: + self.__dict__[''] = value + def __getattr__( self, key ): + if key == '': + return self.__dict__[''] + else: + return getattr( self.__dict__[''], key ) +class _FullLoggedFunction( _LoggedFunction ): + """Fully-logged function wrapper (logs all call params to OpenGL.calltrace)""" + _callTrace = getLog( 'OpenGL.calltrace' ) + def __call__( self, *args, **named ): + argRepr = [] + function = getattr( self, '' ) + for arg in args: + argRepr.append( repr(arg) ) + for key,value in named.items(): + argRepr.append( '%s = %s'%( key,repr(value)) ) + argRepr = ",".join( argRepr ) + self._callTrace.info( '%s( %s )', function.__name__, argRepr ) + try: + return function( *args, **named ) + except Exception as err: + self.log.warn( + """Failure on %s: %s""", function.__name__, self.log.getException( err ) + ) + raise +class _ErrorLoggedFunction ( _LoggedFunction ): + """On-error-logged function wrapper""" + def __call__( self, *args, **named ): + function = getattr( self, '' ) + try: + return function( *args, **named ) + except Exception as err: + self.log.warn( + """Failure on %s: %s""", function.__name__, self.log.getException( err ) + ) + raise + + +def logOnFail( function, log ): + """Produce possible log-wrapped version of function + + function -- callable object to be wrapped + log -- the log to which to log information + + Uses ERROR_LOGGING and FULL_LOGGING + to determine whether/how to wrap the function. + """ + if ERROR_LOGGING or FULL_LOGGING: + if FULL_LOGGING: + loggedFunction = _FullLoggedFunction( function, log ) + else: + loggedFunction = _ErrorLoggedFunction( function, log ) + return loggedFunction + else: + return function diff --git a/vllm/lib/python3.10/site-packages/OpenGL/plugins.py b/vllm/lib/python3.10/site-packages/OpenGL/plugins.py new file mode 100644 index 0000000000000000000000000000000000000000..37d327cbdd9aa38bda90638948595b71088b78f2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/plugins.py @@ -0,0 +1,69 @@ +"""Simple plug-in mechanism to provide replacement for setuptools plugins""" + +class Plugin( object ): + """Base class for plugins to be loaded""" + loaded = False + def __init__( self, name, import_path, check = None, **named ): + """Register the plug-in""" + self.name = name + self.import_path = import_path + self.check = check + self.registry.append( self ) + self.__dict__.update( named ) + def load( self ): + """Attempt to load and return our entry point""" + try: + return importByName( self.import_path ) + except ImportError as err: + return None + @classmethod + def match( cls, *args ): + """Match to return the plugin which is appropriate to load""" + @classmethod + def all( cls ): + """Iterate over all registered plugins""" + return cls.registry[:] + @classmethod + def by_name( cls, name ): + for instance in cls.all(): + if instance.name == name: + return instance + return None + +def importByName( fullName ): + """Import a class by name""" + name = fullName.split(".") + moduleName = name[:-1] + className = name[-1] + module = __import__( ".".join(moduleName), {}, {}, moduleName) + return getattr( module, className ) + + +class PlatformPlugin( Plugin ): + """Platform-level plugin registration""" + registry = [] + @classmethod + def match( cls, key ): + """Determine what platform module to load + + key -- (sys.platform,os.name) key to load + """ + for possible in key: + # prefer sys.platform, *then* os.name + for plugin in cls.registry: + if plugin.name == possible: + return plugin + raise KeyError( """No platform plugin registered for %s"""%(key,)) + +class FormatHandler( Plugin ): + """Data-type storage-format handler""" + registry = [] + @classmethod + def match( cls, value ): + """Lookup appropriate handler based on value (a type)""" + key = '%s.%s'%( value.__module__, value.__name__ ) + for plugin in cls.registry: + set = getattr( plugin, 'check', ()) + if set and key in set: + return plugin + return None diff --git a/vllm/lib/python3.10/site-packages/OpenGL/version.py b/vllm/lib/python3.10/site-packages/OpenGL/version.py new file mode 100644 index 0000000000000000000000000000000000000000..8fdaf6055b88a9e5b6574b3d0516216798dfefc6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/version.py @@ -0,0 +1,2 @@ +"""Declares the current version for use in setuptools and the like""" +__version__ = '3.1.0' diff --git a/vllm/lib/python3.10/site-packages/OpenGL/wrapper.py b/vllm/lib/python3.10/site-packages/OpenGL/wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..e78299ec038c3d0b55fa5ffdb84c23e737b635cc --- /dev/null +++ b/vllm/lib/python3.10/site-packages/OpenGL/wrapper.py @@ -0,0 +1,1487 @@ +"""The wrapping code for providing natural ctypes-based OpenGL interface""" +import ctypes, logging +from OpenGL import platform, error +from OpenGL._configflags import STORE_POINTERS, ERROR_ON_COPY, SIZE_1_ARRAY_UNPACK +from OpenGL import converters +from OpenGL.converters import DefaultCConverter +from OpenGL.converters import returnCArgument,returnPyArgument +from OpenGL.latebind import LateBind +from OpenGL.arrays import arrayhelpers, arraydatatype +from OpenGL._null import NULL +_log = logging.getLogger( 'OpenGL.wrapper' ) + +from OpenGL import acceleratesupport +cWrapper = None +if acceleratesupport.ACCELERATE_AVAILABLE: + try: + from OpenGL_accelerate.latebind import LateBind + from OpenGL_accelerate.wrapper import ( + Wrapper as cWrapper, + CArgCalculator, + PyArgCalculator, + CArgumentCalculator, + ) + except ImportError as err: + _log.warn( """OpenGL_accelerate seems to be installed, but unable to import expected wrapper entry points!""" ) + +if not STORE_POINTERS: + if not ERROR_ON_COPY: + _log.error( """You've specified (not STORE_POINTERS) yet ERROR_ON_COPY is False, this would cause segfaults, so (not STORE_POINTERS) is being ignored""" ) + STORE_POINTERS = True + + +def asList( o ): + """Convert to a list if not already one""" + if not isinstance( o, list ): + return list(o) + return o + +def none_or_pass( incoming, function, arguments ): + return incoming +none_or_pass.optional=True + +class Wrapper( LateBind ): + """Wrapper around a ctypes cFunction object providing SWIG-like hooks + + Attributes: + + wrappedOperation -- base operation, normally a ctypes function + with data-types and error-checking specified + pyConverters -- converters for incoming Python arguments, + provide 1:1 mapping to incoming Python arguments, can + suppress an argument from the argument-set as well + see setPyConverter + pyConverterNames -- caching/storage of the argument names + for the Python converters + cConverters -- converters for incoming C-level arguments + produce Python-level objects in 1:1 mapping to ctypes + arguments from pyConverters results + see setCConverter + cResolvers -- converters turning Python-level objects into + ctypes-compatible data-types + see setCResolver + + Generic Attributes: + + {ARG1}_LOOKUP_{ARG2} -- lookup dictionaries to provide sizes for + ARG1 output value from the value of ARG2, provided for + documentation/reference + {ARG1}_FROM_{ARG2} -- lookup functions to provide sizes for ARG1 + output value from the value of ARG2, provided for + documentation/reference + """ + localProperties = ( + 'wrappedOperation', + '__file__', + 'pyConverters', + 'pyConverterNames', + 'cConverters', + 'cResolvers', + 'storeValues', + 'returnValues', + '_finalCall', + ) + def __init__( self, wrappedOperation ): + """Initialise the wrapper, storing wrappedOperation""" + if isinstance( wrappedOperation, Wrapper ): + wrappedOperation = wrappedOperation.wrappedOperation + self.wrappedOperation = wrappedOperation + def __getattr__( self, key ): + """Delegate attribute lookup to our wrappedOperation""" + if key != 'wrappedOperation': + return getattr( self.wrappedOperation, key ) + raise AttributeError( key ) + def __nonzero__( self ): + """Is this function/wrapper available?""" + return bool( self.wrappedOperation ) + __bool__ = __nonzero__ + def __setattr__( self, key, value ): + """Forward attribute setting to our wrappedOperation""" + if key in self.localProperties: + super( Wrapper, self ).__setattr__( key, value ) + else: + return setattr( self.wrappedOperation, key, value ) + def pyArgIndex( self, argName ): + """Return the Python-argument index for the given argument name""" + argNames = getattr( self, 'pyConverterNames', None ) + if argNames is None: + argNames = self.wrappedOperation.argNames + try: + return asList( argNames ).index( argName ) + except (ValueError,IndexError) as err: + raise KeyError( """No argument %r in argument list %r"""%( + argName, argNames + )) + def cArgIndex( self, argName ): + """Return the C-argument index for the given argument name""" + argNames = self.wrappedOperation.argNames + try: + return asList( argNames ).index( argName ) + except (ValueError,IndexError) as err: + raise KeyError( """No argument %r in argument list %r"""%( + argName, argNames + )) + def setOutput( + self, outArg, size=(1,), pnameArg=None, + arrayType=None, oldStyleReturn=SIZE_1_ARRAY_UNPACK, + orPassIn = False, + ): + """Set the given argName to be an output array + + size -- either a tuple compatible with arrayType.zeros or + a function taking pname to produce such a value. + arrayType -- array data-type used to generate the output + array using the zeros class method... + pnameArg -- optional argument passed into size function, that + is, the name of the argument whose *value* will be passed + to the size function, often the name of an input argument + to be "sized" to match the output argument. + """ + if arrayType is None: + # figure out from self.wrappedOperation's argtypes + index = self.cArgIndex( outArg ) + arrayType = self.wrappedOperation.argtypes[ index ] + if not hasattr( arrayType, 'asArray' ): + if arrayType == ctypes.c_void_p: + from OpenGL.arrays import GLubyteArray + arrayType = GLubyteArray + else: + raise TypeError( "Should only have array types for output parameters" ) + if pnameArg is None: + assert not hasattr(size,'__call__' ) + if orPassIn: + cls = converters.OutputOrInput + else: + cls = converters.Output + conv = cls( + name=outArg, + size=size, + arrayType=arrayType, + ) + else: + if isinstance( size, dict ): + setattr( self, '%s_LOOKUP_%s'%(outArg,pnameArg), size ) + size = size.__getitem__ + else: + setattr( self, '%s_FROM_%s'%(outArg,pnameArg), size ) + assert hasattr( size, '__call__' ) + if orPassIn: + cls = converters.SizedOutputOrInput + else: + cls = converters.SizedOutput + conv = cls( + name=outArg, + specifier=pnameArg, + lookup=size, + arrayType=arrayType, + ) + if oldStyleReturn: + returnObject = conv.oldStyleReturn + else: + returnObject = converters.returnCArgument( outArg ) + if orPassIn: + self.setPyConverter( + outArg, none_or_pass + ) + else: + self.setPyConverter( outArg ) + return self.setCConverter( + outArg, conv, + ).setReturnValues( + returnObject + ) + def typeOfArg( self, outArg ): + """Retrieve the defined data-type for the given outArg (name)""" + index = self.cArgIndex( outArg ) + return self.wrappedOperation.argtypes[ index ] + + if not ERROR_ON_COPY: + def setInputArraySize( self, argName, size=None ): + """Decorate function with vector-handling code for a single argument + + if OpenGL.ERROR_ON_COPY is False, then we return the + named argument, converting to the passed array type, + optionally checking that the array matches size. + + if OpenGL.ERROR_ON_COPY is True, then we will dramatically + simplify this function, only wrapping if size is True, i.e. + only wrapping if we intend to do a size check on the array. + """ + arrayType = self.typeOfArg( argName ) + if not hasattr( arrayType, 'asArray' ): + if arrayType == ctypes.c_void_p: + # special case, we will convert to a void * array... + self.setPyConverter( + argName, + converters.CallFuncPyConverter( arraydatatype.ArrayDatatype.asArray ) + ) + self.setCConverter( argName, converters.getPyArgsName( argName ) ) + return self + elif hasattr( arrayType, '_type_' ) and hasattr(arrayType._type_, '_type_' ): + # is a ctypes array-of-pointers data-type... + # requires special handling no matter what... + return self + else: + raise TypeError( "Should only have array types for output parameters: got %s"%(arrayType,) ) + if size is not None: + self.setPyConverter( argName, arrayhelpers.asArrayTypeSize(arrayType, size) ) + else: + self.setPyConverter( argName, arrayhelpers.asArrayType(arrayType) ) + self.setCConverter( argName, converters.getPyArgsName( argName ) ) + return self + else: + def setInputArraySize( self, argName, size=None ): + """Decorate function with vector-handling code for a single argument + + if OpenGL.ERROR_ON_COPY is False, then we return the + named argument, converting to the passed array type, + optionally checking that the array matches size. + + if OpenGL.ERROR_ON_COPY is True, then we will dramatically + simplify this function, only wrapping if size is True, i.e. + only wrapping if we intend to do a size check on the array. + """ + if size is not None: + arrayType = self.typeOfArg( argName ) + # return value is always the source array... + if hasattr( arrayType, 'asArray' ): + self.setPyConverter( argName, arrayhelpers.asArrayTypeSize(arrayType, size) ) + self.setCConverter( argName, + converters.getPyArgsName( argName ) + ) + return self + + def setPyConverter( self, argName, function = NULL ): + """Set Python-argument converter for given argument + + argName -- the argument name which will be coerced to a usable internal + format using the function provided. + function -- None (indicating a simple copy), NULL (default) to eliminate + the argument from the Python argument-list, or a callable object with + the signature: + + converter(arg, wrappedOperation, args) + + where arg is the particular argument on which the convert is working, + wrappedOperation is the underlying wrapper, and args is the set of + original Python arguments to the function. + + Note that you need exactly the same number of pyConverters as Python + arguments. + """ + if not hasattr( self, 'pyConverters' ): + self.pyConverters = [None]*len( self.wrappedOperation.argNames ) + self.pyConverterNames = list(self.wrappedOperation.argNames) + try: + i = asList( self.pyConverterNames ).index( argName ) + except ValueError: + raise AttributeError( """No argument named %r left in pyConverters for %r: %s"""%( + argName, self.wrappedOperation.__name__, self.pyConverterNames, + )) + if function is NULL: + del self.pyConverters[i] + del self.pyConverterNames[i] + else: + self.pyConverters[i] = function + return self + def setCConverter( self, argName, function ): + """Set C-argument converter for a given argument + + argName -- the argument name whose C-compatible representation will + be calculated with the passed function. + function -- None (indicating a simple copy), a non-callable object to + be copied into the result-list itself, or a callable object with + the signature: + + converter( pyArgs, index, wrappedOperation ) + + where pyArgs is the set of passed Python arguments, with the + pyConverters already applied, index is the index of the C argument + and wrappedOperation is the underlying function. + + C-argument converters are your chance to expand/contract a Python + argument list (pyArgs) to match the number of arguments expected by + the ctypes baseOperation. You can't have a "null" C-argument converter, + as *something* has to be passed to the C-level function in the + parameter. + """ + if not hasattr( self, 'cConverters' ): + self.cConverters = [None]*len( self.wrappedOperation.argNames ) + try: + if not isinstance(self.wrappedOperation.argNames, list): + self.wrappedOperation.argNames = list( self.wrappedOperation.argNames ) + i = asList( self.wrappedOperation.argNames ).index( argName ) + except ValueError: + raise AttributeError( """No argument named %r left in cConverters: %s"""%( + argName, self.wrappedOperation.argNames, + )) + self.cConverters[i] = function + return self + def setCResolver( self, argName, function=NULL ): + """Set C-argument converter for a given argument""" + if not hasattr( self, 'cResolvers' ): + self.cResolvers = [None]*len( self.wrappedOperation.argNames ) + try: + if not isinstance(self.wrappedOperation.argNames, list): + self.wrappedOperation.argNames = list( self.wrappedOperation.argNames ) + i = asList( self.wrappedOperation.argNames).index( argName ) + except ValueError: + raise AttributeError( """No argument named %r left in cConverters: %s"""%( + argName, self.wrappedOperation.argNames, + )) + if function is NULL: + del self.cResolvers[i] + else: + self.cResolvers[i] = function + return self + def setStoreValues( self, function=NULL ): + """Set the storage-of-arguments function for the whole wrapper""" + if function is NULL or ERROR_ON_COPY and not STORE_POINTERS: + try: + del self.storeValues + except Exception as err: + pass + else: + self.storeValues = function + return self + def setReturnValues( self, function=NULL ): + """Set the return-of-results function for the whole wrapper""" + if function is NULL: + try: + del self.returnValues + except Exception as err: + pass + else: + self.returnValues = function + return self + + def finalise( self ): + """Finalise our various elements into simple index-based operations""" + for attribute in ('pyConverters','cConverters','cResolvers' ): + value = getattr( self, attribute, None ) + if value is not None: + for i,item in enumerate(value): + if hasattr( item, 'finalise' ): + try: + item.finalise( self ) + except Exception as err: + raise error.Error( + """Error finalising item %s in %s for %s (%r): %s"""%( + i,attribute,self,item,err, + ) + ) + if hasattr( self, 'cConverters' ): + for i,converter in enumerate( self.cConverters ): + if isinstance( converter, (type(None),DefaultCConverter )): + self.cConverters[i] = DefaultCConverter( self.pyArgIndex( self.argNames[i]) ) + for attribute in ('storeValues','returnValues',): + item = getattr( self, attribute, None ) + if hasattr( item, 'finalise' ): + item.finalise( self ) + callFunction = self.finaliseCall() + if not callFunction: + raise RuntimeError( """Missing finalised call type for %s"""%( self, )) + else: + #self.__class__.finalize = lambda *args: callFunction + #self.__call__ = callFunction + #self.__class__.__call__ = callFunction + #self.__class__.set_call( callFunction ) + #self.__class__.__dict__[ '__call__' ] = callFunction + #print 'setting class call', callFunction + self.setFinalCall( callFunction ) + return callFunction + #return self + def finaliseCall( self ): + """Produce specialised versions of call for finalised wrapper object + + This returns a version of __call__ that only does that work which is + required by the particular wrapper object + + This is essentially a huge set of expanded nested functions, very + inelegant... + """ + pyConverters = getattr( self, 'pyConverters', None ) + cConverters = getattr( self, 'cConverters', None ) + cResolvers = getattr( self, 'cResolvers', None ) + wrappedOperation = self.wrappedOperation + storeValues = getattr( self, 'storeValues', None ) + returnValues = getattr( self, 'returnValues', None ) + if pyConverters: + if cWrapper: + calculate_pyArgs = PyArgCalculator( + self,pyConverters, + ) + else: + pyConverters_mapped = [ + (i,converter,(converter is None)) + for (i,converter) in enumerate( pyConverters ) + ] + pyConverters_length = len([p for p in pyConverters if not getattr( p, 'optional', False)]) + def calculate_pyArgs( args ): + if pyConverters_length > len(args): + raise ValueError( + """%s requires %r arguments (%s), received %s: %r"""%( + wrappedOperation.__name__, + pyConverters_length, + ", ".join( self.pyConverterNames ), + len(args), + args + ) + ) + for index,converter,isNone in pyConverters_mapped: + if isNone: + yield args[index] + else: + try: + yield converter(args[index], self, args) + except IndexError as err: + yield NULL + except Exception as err: + if hasattr( err, 'args' ): + err.args += ( converter, ) + raise + else: + calculate_pyArgs = None + if cConverters: + if cWrapper: + calculate_cArgs = CArgCalculator( self, cConverters ) + else: + cConverters_mapped = [ + (i,converter,hasattr(converter,'__call__')) + for (i,converter) in enumerate( cConverters ) + ] + def calculate_cArgs( pyArgs ): + for index,converter,canCall in cConverters_mapped: + if canCall: + try: + yield converter( pyArgs, index, self ) + except Exception as err: + if hasattr( err, 'args' ): + err.args += ( + """Failure in cConverter %r"""%(converter), + pyArgs, index, self, + ) + raise + else: + yield converter + else: + calculate_cArgs = None + if cResolvers: + if cWrapper: + calculate_cArguments = CArgumentCalculator( cResolvers ) + else: + cResolvers_mapped = list(enumerate(cResolvers)) + def calculate_cArguments( cArgs ): + for i,converter in cResolvers_mapped: + if converter is None: + yield cArgs[i] + else: + try: + yield converter( cArgs[i] ) + except Exception as err: + err.args += (converter,) + raise + else: + calculate_cArguments = None + if cWrapper: + return cWrapper( + wrappedOperation, + calculate_pyArgs=calculate_pyArgs, + calculate_cArgs=calculate_cArgs, + calculate_cArguments=calculate_cArguments, + storeValues=storeValues, + returnValues=returnValues, + ) + if pyConverters: + if cConverters: + # create a map of index,converter, callable + if cResolvers: + if storeValues: + if returnValues: + def wrapperCall( *args ): + """Wrapper with all possible operations""" + pyArgs = tuple( calculate_pyArgs( args )) + cArgs = tuple(calculate_cArgs( pyArgs )) + cArguments = tuple(calculate_cArguments( cArgs )) + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = pyArgs + raise err + # handle storage of persistent argument values... + storeValues( + result, + self, + pyArgs, + cArgs, + ) + return returnValues( + result, + self, + pyArgs, + cArgs, + ) + return wrapperCall + else: + def wrapperCall( *args ): + """Wrapper with all save returnValues""" + pyArgs = tuple( calculate_pyArgs( args )) + cArgs = tuple(calculate_cArgs( pyArgs )) + cArguments = tuple(calculate_cArguments( cArgs )) + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = pyArgs + raise err + # handle storage of persistent argument values... + storeValues( + result, + self, + pyArgs, + cArgs, + ) + return result + return wrapperCall + else: # null storeValues + if returnValues: + def wrapperCall( *args ): + """Wrapper with all save storeValues""" + pyArgs = tuple( calculate_pyArgs( args )) + cArgs = tuple(calculate_cArgs( pyArgs )) + cArguments = tuple(calculate_cArguments( cArgs )) + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = pyArgs + raise err + return returnValues( + result, + self, + pyArgs, + cArgs, + ) + return wrapperCall + else: + def wrapperCall( *args ): + """Wrapper with all save returnValues and storeValues""" + pyArgs = tuple( calculate_pyArgs( args )) + cArgs = tuple(calculate_cArgs( pyArgs )) + cArguments = tuple(calculate_cArguments( cArgs )) + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = pyArgs + raise err + return result + return wrapperCall + else: + # null cResolvers + if storeValues: + if returnValues: + def wrapperCall( *args ): + """Wrapper with all possible operations""" + pyArgs = tuple( calculate_pyArgs( args )) + cArgs = tuple(calculate_cArgs( pyArgs )) + cArguments = cArgs + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = pyArgs + raise err + # handle storage of persistent argument values... + storeValues( + result, + self, + pyArgs, + cArgs, + ) + return returnValues( + result, + self, + pyArgs, + cArgs, + ) + return wrapperCall + else: + def wrapperCall( *args ): + """Wrapper with all save returnValues""" + pyArgs = tuple( calculate_pyArgs( args )) + cArgs = tuple(calculate_cArgs( pyArgs )) + cArguments = cArgs + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = pyArgs + raise err + # handle storage of persistent argument values... + storeValues( + result, + self, + pyArgs, + cArgs, + ) + return result + return wrapperCall + else: # null storeValues + if returnValues: + def wrapperCall( *args ): + """Wrapper with all save storeValues""" + pyArgs = tuple( calculate_pyArgs( args )) + cArgs = tuple(calculate_cArgs( pyArgs )) + cArguments = cArgs + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = pyArgs + raise err + return returnValues( + result, + self, + pyArgs, + cArgs, + ) + return wrapperCall + else: + def wrapperCall( *args ): + """Wrapper with all save returnValues and storeValues""" + pyArgs = tuple( calculate_pyArgs( args )) + cArgs = tuple(calculate_cArgs( pyArgs )) + cArguments = cArgs + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = pyArgs + raise err + return result + return wrapperCall + else: + # null cConverters + if cResolvers: + if storeValues: + if returnValues: + def wrapperCall( *args ): + """Wrapper with all possible operations""" + pyArgs = tuple( calculate_pyArgs( args )) + cArgs = pyArgs + cArguments = tuple(calculate_cArguments( cArgs )) + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = pyArgs + raise err + # handle storage of persistent argument values... + storeValues( + result, + self, + pyArgs, + cArgs, + ) + return returnValues( + result, + self, + pyArgs, + cArgs, + ) + return wrapperCall + else: + def wrapperCall( *args ): + """Wrapper with all save returnValues""" + pyArgs = tuple( calculate_pyArgs( args )) + cArgs = pyArgs + cArguments = tuple(calculate_cArguments( cArgs )) + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = pyArgs + raise err + # handle storage of persistent argument values... + storeValues( + result, + self, + pyArgs, + cArgs, + ) + return result + return wrapperCall + else: # null storeValues + if returnValues: + def wrapperCall( *args ): + """Wrapper with all save storeValues""" + pyArgs = tuple( calculate_pyArgs( args )) + cArgs = pyArgs + cArguments = tuple(calculate_cArguments( cArgs )) + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = pyArgs + raise err + return returnValues( + result, + self, + pyArgs, + cArgs, + ) + return wrapperCall + else: + def wrapperCall( *args ): + """Wrapper with all save returnValues and storeValues""" + pyArgs = tuple( calculate_pyArgs( args )) + cArgs = pyArgs + cArguments = tuple(calculate_cArguments( cArgs )) + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = pyArgs + raise err + return result + return wrapperCall + else: + # null cResolvers + if storeValues: + if returnValues: + def wrapperCall( *args ): + """Wrapper with all possible operations""" + pyArgs = tuple( calculate_pyArgs( args )) + cArguments = pyArgs + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArguments + err.pyArgs = pyArgs + raise err + # handle storage of persistent argument values... + storeValues( + result, + self, + pyArgs, + cArguments, + ) + return returnValues( + result, + self, + pyArgs, + cArguments, + ) + return wrapperCall + else: + def wrapperCall( *args ): + """Wrapper with all save returnValues""" + pyArgs = tuple( calculate_pyArgs( args )) + cArguments = pyArgs + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArguments + err.pyArgs = pyArgs + raise err + # handle storage of persistent argument values... + storeValues( + result, + self, + pyArgs, + cArguments, + ) + return result + return wrapperCall + else: # null storeValues + if returnValues: + def wrapperCall( *args ): + """Wrapper with all save storeValues""" + pyArgs = tuple( calculate_pyArgs( args )) + cArguments = pyArgs + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArguments + err.pyArgs = pyArgs + raise err + return returnValues( + result, + self, + pyArgs, + cArguments, + ) + return wrapperCall + else: + def wrapperCall( *args ): + """Wrapper with all save returnValues and storeValues""" + pyArgs = tuple( calculate_pyArgs( args )) + cArguments = pyArgs + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArguments + err.pyArgs = pyArgs + raise err + return result + return wrapperCall + else: + # null pyConverters + if cConverters: + if cResolvers: + if storeValues: + if returnValues: + def wrapperCall( *args ): + """Wrapper with all possible operations""" + pyArgs = args + cArgs = [] + for (index,converter) in enumerate( cConverters ): + # move enumerate out... + if not hasattr(converter,'__call__'): + cArgs.append( converter ) + else: + try: + cArgs.append( + converter( pyArgs, index, self ) + ) + except Exception as err: + if hasattr( err, 'args' ): + err.args += ( + """Failure in cConverter %r"""%(converter), + pyArgs, index, + ) + raise + cArguments = tuple(calculate_cArguments( cArgs )) + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = pyArgs + raise err + # handle storage of persistent argument values... + storeValues( + result, + self, + pyArgs, + cArgs, + ) + return returnValues( + result, + self, + pyArgs, + cArgs, + ) + return wrapperCall + else: + def wrapperCall( *args ): + """Wrapper with all save returnValues""" + pyArgs = args + cArgs = [] + for (index,converter) in enumerate( cConverters ): + # move enumerate out... + if not hasattr(converter,'__call__'): + cArgs.append( converter ) + else: + try: + cArgs.append( + converter( pyArgs, index, self ) + ) + except Exception as err: + if hasattr( err, 'args' ): + err.args += ( + """Failure in cConverter %r"""%(converter), + pyArgs, index, + ) + raise + cArguments = tuple(calculate_cArguments( cArgs )) + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = pyArgs + raise err + # handle storage of persistent argument values... + storeValues( + result, + self, + pyArgs, + cArgs, + ) + return result + return wrapperCall + else: # null storeValues + if returnValues: + def wrapperCall( *args ): + """Wrapper with all save storeValues""" + pyArgs = args + cArgs = [] + for (index,converter) in enumerate( cConverters ): + # move enumerate out... + if not hasattr(converter,'__call__'): + cArgs.append( converter ) + else: + try: + cArgs.append( + converter( pyArgs, index, self ) + ) + except Exception as err: + if hasattr( err, 'args' ): + err.args += ( + """Failure in cConverter %r"""%(converter), + pyArgs, index, + ) + raise + cArguments = tuple(calculate_cArguments( cArgs )) + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = pyArgs + raise err + return returnValues( + result, + self, + pyArgs, + cArgs, + ) + return wrapperCall + else: + def wrapperCall( *args ): + """Wrapper with all save returnValues and storeValues""" + pyArgs = args + cArgs = [] + for (index,converter) in enumerate( cConverters ): + # move enumerate out... + if not hasattr(converter,'__call__'): + cArgs.append( converter ) + else: + try: + cArgs.append( + converter( pyArgs, index, self ) + ) + except Exception as err: + if hasattr( err, 'args' ): + err.args += ( + """Failure in cConverter %r"""%(converter), + pyArgs, index, + ) + raise + cArguments = tuple(calculate_cArguments( cArgs )) + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = pyArgs + raise err + return result + return wrapperCall + else: + # null cResolvers + if storeValues: + if returnValues: + def wrapperCall( *args ): + """Wrapper with all possible operations""" + pyArgs = args + cArgs = [] + for (index,converter) in enumerate( cConverters ): + # move enumerate out... + if not hasattr(converter,'__call__'): + cArgs.append( converter ) + else: + try: + cArgs.append( + converter( pyArgs, index, self ) + ) + except Exception as err: + if hasattr( err, 'args' ): + err.args += ( + """Failure in cConverter %r"""%(converter), + pyArgs, index, + ) + raise + cArguments = cArgs + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = pyArgs + raise err + # handle storage of persistent argument values... + storeValues( + result, + self, + pyArgs, + cArgs, + ) + return returnValues( + result, + self, + pyArgs, + cArgs, + ) + return wrapperCall + else: + def wrapperCall( *args ): + """Wrapper with all save returnValues""" + pyArgs = args + cArgs = [] + for (index,converter) in enumerate( cConverters ): + # move enumerate out... + if not hasattr(converter,'__call__'): + cArgs.append( converter ) + else: + try: + cArgs.append( + converter( pyArgs, index, self ) + ) + except Exception as err: + if hasattr( err, 'args' ): + err.args += ( + """Failure in cConverter %r"""%(converter), + pyArgs, index, + ) + raise + cArguments = cArgs + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = pyArgs + raise err + # handle storage of persistent argument values... + storeValues( + result, + self, + pyArgs, + cArgs, + ) + return result + return wrapperCall + else: # null storeValues + if returnValues: + def wrapperCall( *args ): + """Wrapper with all save storeValues""" + pyArgs = args + cArgs = [] + for (index,converter) in enumerate( cConverters ): + # move enumerate out... + if not hasattr(converter,'__call__'): + cArgs.append( converter ) + else: + try: + cArgs.append( + converter( pyArgs, index, self ) + ) + except Exception as err: + if hasattr( err, 'args' ): + err.args += ( + """Failure in cConverter %r"""%(converter), + pyArgs, index, + ) + raise + cArguments = cArgs + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = pyArgs + raise err + return returnValues( + result, + self, + pyArgs, + cArgs, + ) + return wrapperCall + else: + def wrapperCall( *args ): + """Wrapper with all save returnValues and storeValues""" + pyArgs = args + cArgs = [] + for (index,converter) in enumerate( cConverters ): + # move enumerate out... + if not hasattr(converter,'__call__'): + cArgs.append( converter ) + else: + try: + cArgs.append( + converter( pyArgs, index, self ) + ) + except Exception as err: + if hasattr( err, 'args' ): + err.args += ( + """Failure in cConverter %r"""%(converter), + pyArgs, index, + ) + raise + cArguments = cArgs + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = pyArgs + raise err + return result + return wrapperCall + else: + # null cConverters + if cResolvers: + if storeValues: + if returnValues: + def wrapperCall( *args ): + """Wrapper with all possible operations""" + cArgs = args + cArguments = tuple(calculate_cArguments( cArgs )) + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = args + raise err + # handle storage of persistent argument values... + storeValues( + result, + self, + args, + cArgs, + ) + return returnValues( + result, + self, + args, + cArgs, + ) + return wrapperCall + else: + def wrapperCall( *args ): + """Wrapper with all save returnValues""" + cArgs = args + cArguments = tuple(calculate_cArguments( cArgs )) + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = args + raise err + # handle storage of persistent argument values... + storeValues( + result, + self, + args, + cArgs, + ) + return result + return wrapperCall + else: # null storeValues + if returnValues: + def wrapperCall( *args ): + """Wrapper with all save storeValues""" + cArgs = args + cArguments = tuple(calculate_cArguments( cArgs )) + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = args + raise err + return returnValues( + result, + self, + args, + cArgs, + ) + return wrapperCall + else: + def wrapperCall( *args ): + """Wrapper with all save returnValues and storeValues""" + cArgs = args + cArguments = tuple(calculate_cArguments( cArgs )) + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = args + raise err + return result + return wrapperCall + else: + # null cResolvers + if storeValues: + if returnValues: + def wrapperCall( *args ): + """Wrapper with all possible operations""" + cArguments = args + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArguments + err.pyArgs = args + raise err + # handle storage of persistent argument values... + storeValues( + result, + self, + args, + cArguments, + ) + return returnValues( + result, + self, + args, + cArguments, + ) + return wrapperCall + else: + def wrapperCall( *args ): + """Wrapper with all save returnValues""" + cArguments = args + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArguments + err.pyArgs = args + raise err + # handle storage of persistent argument values... + storeValues( + result, + self, + args, + cArguments, + ) + return result + return wrapperCall + else: # null storeValues + if returnValues: + def wrapperCall( *args ): + """Wrapper with all save storeValues""" + cArguments = args + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArguments + err.pyArgs = args + raise err + return returnValues( + result, + self, + args, + cArguments, + ) + return wrapperCall + else: + def wrapperCall( *args ): + """Wrapper with all save returnValues and storeValues""" + cArguments = args + try: + result = wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArguments + err.pyArgs = args + raise err + return result + return wrapperCall +# def __call__( self, *args, **named ): +# """Finalise the wrapper before calling it""" +# try: +# return self._finalCall( *args, **named ) +# except AttributeError, err: +# return self.finalise()( *args, **named ) + + def _unspecialised__call__( self, *args ): + """Expand arguments, call the function, store values and check errors""" + pyConverters = getattr( self, 'pyConverters', None ) + if pyConverters: + if len(pyConverters) != len(args): + raise ValueError( + """%s requires %r arguments (%s), received %s: %r"""%( + self.wrappedOperation.__name__, + len(pyConverters), + ", ".join( self.pyConverterNames ), + len(args), + args + ) + ) + pyArgs = [] + for (converter,arg) in zip(pyConverters,args): + if converter is None: + pyArgs.append( arg ) + else: + pyArgs.append( converter(arg, self, args) ) + else: + pyArgs = args + cConverters = getattr( self, 'cConverters', None ) + if cConverters: + cArgs = [] + for (index,converter) in enumerate( cConverters ): + if not hasattr(converter,'__call__'): + cArgs.append( converter ) + else: + try: + cArgs.append( + converter( pyArgs, index, self ) + ) + except Exception as err: + if hasattr( err, 'args' ): + err.args += ( + """Failure in cConverter %r"""%(converter), + pyArgs, index, self, + ) + raise + else: + cArgs = pyArgs + cResolvers = getattr( self, 'cResolvers', None ) + if cResolvers: + cArguments = [] + for (converter, value) in zip( cResolvers, cArgs ): + if converter is None: + cArguments.append( value ) + else: + cArguments.append( converter( value ) ) + else: + cArguments = cArgs + try: + result = self.wrappedOperation( *cArguments ) + except ctypes.ArgumentError as err: + err.args = err.args + (cArguments,) + raise err + except error.GLError as err: + err.cArgs = cArgs + err.pyArgs = pyArgs + raise err + storeValues = getattr( self, 'storeValues', None ) + if storeValues is not None: + # handle storage of persistent argument values... + storeValues( + result, + self, + pyArgs, + cArgs, + ) + returnValues = getattr( self, 'returnValues', None ) + if returnValues is not None: + return returnValues( + result, + self, + pyArgs, + cArgs, + ) + else: + return result + +def wrapper( wrappedOperation ): + """Create a Wrapper sub-class instance for the given wrappedOperation + + The purpose of this function is to create a subclass of Wrapper which + has the __doc__ and __name__ of the wrappedOperation so that the instance of + the wrapper will show up as by default, + and will have the docstring available naturally in pydoc and the like. + """ + if isinstance( wrappedOperation, Wrapper ): + return wrappedOperation + dict = { + '__doc__': wrappedOperation.__doc__, + '__slots__': ('wrappedOperation', ), + } + cls = type( wrappedOperation.__name__, (Wrapper,), dict ) + if hasattr( wrappedOperation, '__module__' ): + cls.__module__ = wrappedOperation.__module__ + instance = cls(wrappedOperation) + return instance diff --git a/vllm/lib/python3.10/site-packages/flash_attn-2.7.2.post1.dist-info/AUTHORS b/vllm/lib/python3.10/site-packages/flash_attn-2.7.2.post1.dist-info/AUTHORS new file mode 100644 index 0000000000000000000000000000000000000000..e35a781665eafa7421c30241962ef8e49588bffc --- /dev/null +++ b/vllm/lib/python3.10/site-packages/flash_attn-2.7.2.post1.dist-info/AUTHORS @@ -0,0 +1 @@ +Tri Dao, trid@cs.stanford.edu \ No newline at end of file diff --git a/vllm/lib/python3.10/site-packages/flash_attn-2.7.2.post1.dist-info/INSTALLER b/vllm/lib/python3.10/site-packages/flash_attn-2.7.2.post1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/flash_attn-2.7.2.post1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/vllm/lib/python3.10/site-packages/flash_attn-2.7.2.post1.dist-info/LICENSE b/vllm/lib/python3.10/site-packages/flash_attn-2.7.2.post1.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..5860e4b33f3d9d85fc636137c559331d51783a5b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/flash_attn-2.7.2.post1.dist-info/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2022, the respective contributors, as shown by the AUTHORS file. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vllm/lib/python3.10/site-packages/flash_attn-2.7.2.post1.dist-info/METADATA b/vllm/lib/python3.10/site-packages/flash_attn-2.7.2.post1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..ec083af504fec17fe2bd3bd82f58c6facc76f60a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/flash_attn-2.7.2.post1.dist-info/METADATA @@ -0,0 +1,541 @@ +Metadata-Version: 2.1 +Name: flash-attn +Version: 2.7.2.post1 +Summary: Flash Attention: Fast and Memory-Efficient Exact Attention +Home-page: https://github.com/Dao-AILab/flash-attention +Author: Tri Dao +Author-email: tri@tridao.me +Classifier: Programming Language :: Python :: 3 +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: Unix +Requires-Python: >=3.9 +Description-Content-Type: text/markdown +License-File: LICENSE +License-File: AUTHORS +Requires-Dist: torch +Requires-Dist: einops + +# FlashAttention +This repository provides the official implementation of FlashAttention and +FlashAttention-2 from the +following papers. + +**FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness** +Tri Dao, Daniel Y. Fu, Stefano Ermon, Atri Rudra, Christopher Ré +Paper: https://arxiv.org/abs/2205.14135 +IEEE Spectrum [article](https://spectrum.ieee.org/mlperf-rankings-2022) about our submission to the MLPerf 2.0 benchmark using FlashAttention. +![FlashAttention](assets/flashattn_banner.jpg) + +**FlashAttention-2: Faster Attention with Better Parallelism and Work Partitioning** +Tri Dao + +Paper: https://tridao.me/publications/flash2/flash2.pdf + +![FlashAttention-2](assets/flashattention_logo.png) + + +## Usage + +We've been very happy to see FlashAttention being widely adopted in such a short +time after its release. This [page](https://github.com/Dao-AILab/flash-attention/blob/main/usage.md) +contains a partial list of places where FlashAttention is being used. + +FlashAttention and FlashAttention-2 are free to use and modify (see LICENSE). +Please cite and credit FlashAttention if you use it. + + +## FlashAttention-3 beta release +FlashAttention-3 is optimized for Hopper GPUs (e.g. H100). + +Blogpost: https://tridao.me/blog/2024/flash3/ + +Paper: https://tridao.me/publications/flash3/flash3.pdf + +![FlashAttention-3 speedup on H100 80GB SXM5 with FP16](assets/flash3_fp16_fwd.png) + +This is a beta release for testing / benchmarking before we integrate that with +the rest of the repo. + +Currently released: +- FP16 / BF16 forward and backward, FP8 forward + +Requirements: H100 / H800 GPU, CUDA >= 12.3. + +For now, we highly recommend CUDA 12.3 for best performance. + +To install: +```sh +cd hopper +python setup.py install +``` +To run the test: +```sh +export PYTHONPATH=$PWD +pytest -q -s test_flash_attn.py +``` +Once the package is installed, you can import it as follows: +```python +import flash_attn_interface +flash_attn_interface.flash_attn_func() +``` + +## Installation and features +**Requirements:** +- CUDA toolkit or ROCm toolkit +- PyTorch 1.12 and above. +- `packaging` Python package (`pip install packaging`) +- `ninja` Python package (`pip install ninja`) * +- Linux. Might work for Windows starting v2.3.2 (we've seen a few positive [reports](https://github.com/Dao-AILab/flash-attention/issues/595)) but Windows compilation still requires more testing. If you have ideas on how to set up prebuilt CUDA wheels for Windows, please reach out via Github issue. + +\* Make sure that `ninja` is installed and that it works correctly (e.g. `ninja +--version` then `echo $?` should return exit code 0). If not (sometimes `ninja +--version` then `echo $?` returns a nonzero exit code), uninstall then reinstall +`ninja` (`pip uninstall -y ninja && pip install ninja`). Without `ninja`, +compiling can take a very long time (2h) since it does not use multiple CPU +cores. With `ninja` compiling takes 3-5 minutes on a 64-core machine using CUDA toolkit. + +**To install:** +```sh +pip install flash-attn --no-build-isolation +``` +Alternatively you can compile from source: +```sh +python setup.py install +``` + +If your machine has less than 96GB of RAM and lots of CPU cores, `ninja` might +run too many parallel compilation jobs that could exhaust the amount of RAM. To +limit the number of parallel compilation jobs, you can set the environment +variable `MAX_JOBS`: +```sh +MAX_JOBS=4 pip install flash-attn --no-build-isolation +``` + +**Interface:** `src/flash_attention_interface.py` + +### NVIDIA CUDA Support +**Requirements:** +- CUDA 11.7 and above. + +We recommend the +[Pytorch](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch) +container from Nvidia, which has all the required tools to install FlashAttention. + +FlashAttention-2 with CUDA currently supports: +1. Ampere, Ada, or Hopper GPUs (e.g., A100, RTX 3090, RTX 4090, H100). Support for Turing + GPUs (T4, RTX 2080) is coming soon, please use FlashAttention 1.x for Turing + GPUs for now. +2. Datatype fp16 and bf16 (bf16 requires Ampere, Ada, or Hopper GPUs). +3. All head dimensions up to 256. ~~Head dim > 192 backward requires A100/A800 or H100/H800~~. Head dim 256 backward now works on consumer GPUs (if there's no dropout) as of flash-attn 2.5.5. + +### AMD ROCm Support +ROCm version has two backends. There is [composable_kernel](https://github.com/ROCm/composable_kernel) (ck) which is the default backend and a [Triton](https://github.com/triton-lang/triton) backend. They provide an implementation of FlashAttention-2. + +**Requirements:** +- ROCm 6.0 and above. + +We recommend the +[Pytorch](https://hub.docker.com/r/rocm/pytorch) +container from ROCm, which has all the required tools to install FlashAttention. + +#### Composable Kernel Backend +FlashAttention-2 ROCm CK backend currently supports: +1. MI200 or MI300 GPUs. +2. Datatype fp16 and bf16 +3. Forward's head dimensions up to 256. Backward head dimensions up to 128. + +#### Triton Backend +The Triton implementation of the [Flash Attention v2](https://tridao.me/publications/flash2/flash2.pdf) is currently a work in progress. + +It supports AMD's CDNA (MI200, MI300) and RDNA GPU's using fp16, bf16 and fp32 datatypes. + +These features are supported in Fwd and Bwd +1) Fwd and Bwd with causal masking +2) Variable sequence lengths +3) Arbitrary Q and KV sequence lengths +4) Arbitrary head sizes + +These features are supported in Fwd for now. We will add them to backward soon. +1) Multi and grouped query attention +2) ALiBi and matrix bias + +These features are in development +1) Paged Attention +2) Sliding Window +3) Rotary embeddings +4) Dropout +5) Performance Improvements + +#### Getting Started +To get started with the triton backend for AMD, follow the steps below. + +First install the recommended Triton [commit](https://github.com/triton-lang/triton/commit/3ca2f498e98ed7249b82722587c511a5610e00c4). + +``` +git clone https://github.com/triton-lang/triton +cd triton +git checkout 3ca2f498e98ed7249b82722587c511a5610e00c4 +pip install --verbose -e python +``` +Then install and test Flash Attention with the flag `FLASH_ATTENTION_TRITON_AMD_ENABLE` set to `"TRUE"`. + +``` +export FLASH_ATTENTION_TRITON_AMD_ENABLE="TRUE" +cd flash-attention +python setup.py install +pytest tests/test_flash_attn.py +``` + + +## How to use FlashAttention + +The main functions implement scaled dot product attention (softmax(Q @ K^T * +softmax_scale) @ V): +```python +from flash_attn import flash_attn_qkvpacked_func, flash_attn_func +``` + +```python +flash_attn_qkvpacked_func(qkv, dropout_p=0.0, softmax_scale=None, causal=False, + window_size=(-1, -1), alibi_slopes=None, deterministic=False): +"""dropout_p should be set to 0.0 during evaluation +If Q, K, V are already stacked into 1 tensor, this function will be faster than +calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation +of the gradients of Q, K, V. +If window_size != (-1, -1), implements sliding window local attention. Query at position i +will only attend to keys between [i - window_size[0], i + window_size[1]] inclusive. +Arguments: + qkv: (batch_size, seqlen, 3, nheads, headdim) + dropout_p: float. Dropout probability. + softmax_scale: float. The scaling of QK^T before applying softmax. + Default to 1 / sqrt(headdim). + causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling). + window_size: (left, right). If not (-1, -1), implements sliding window local attention. + alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of (-alibi_slope * |i - j|) is added to + the attention score of query i and key j. + deterministic: bool. Whether to use the deterministic implementation of the backward pass, + which is slightly slower and uses more memory. The forward pass is always deterministic. +Return: + out: (batch_size, seqlen, nheads, headdim). +""" +``` + +```python +flash_attn_func(q, k, v, dropout_p=0.0, softmax_scale=None, causal=False, + window_size=(-1, -1), alibi_slopes=None, deterministic=False): +"""dropout_p should be set to 0.0 during evaluation +Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads +than Q. Note that the number of heads in Q must be divisible by the number of heads in KV. +For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head +0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V. +If window_size != (-1, -1), implements sliding window local attention. Query at position i +will only attend to keys between +[i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive. + +Arguments: + q: (batch_size, seqlen, nheads, headdim) + k: (batch_size, seqlen, nheads_k, headdim) + v: (batch_size, seqlen, nheads_k, headdim) + dropout_p: float. Dropout probability. + softmax_scale: float. The scaling of QK^T before applying softmax. + Default to 1 / sqrt(headdim). + causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling). + window_size: (left, right). If not (-1, -1), implements sliding window local attention. + alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of + (-alibi_slope * |i + seqlen_k - seqlen_q - j|) + is added to the attention score of query i and key j. + deterministic: bool. Whether to use the deterministic implementation of the backward pass, + which is slightly slower and uses more memory. The forward pass is always deterministic. +Return: + out: (batch_size, seqlen, nheads, headdim). +""" +``` + +```python +def flash_attn_with_kvcache( + q, + k_cache, + v_cache, + k=None, + v=None, + rotary_cos=None, + rotary_sin=None, + cache_seqlens: Optional[Union[(int, torch.Tensor)]] = None, + cache_batch_idx: Optional[torch.Tensor] = None, + block_table: Optional[torch.Tensor] = None, + softmax_scale=None, + causal=False, + window_size=(-1, -1), # -1 means infinite context window + rotary_interleaved=True, + alibi_slopes=None, +): + """ + If k and v are not None, k_cache and v_cache will be updated *inplace* with the new values from + k and v. This is useful for incremental decoding: you can pass in the cached keys/values from + the previous step, and update them with the new keys/values from the current step, and do + attention with the updated cache, all in 1 kernel. + + If you pass in k / v, you must make sure that the cache is large enough to hold the new values. + For example, the KV cache could be pre-allocated with the max sequence length, and you can use + cache_seqlens to keep track of the current sequence lengths of each sequence in the batch. + + Also apply rotary embedding if rotary_cos and rotary_sin are passed in. The key @k will be + rotated by rotary_cos and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc. + If causal or local (i.e., window_size != (-1, -1)), the query @q will be rotated by rotary_cos + and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc. + If not causal and not local, the query @q will be rotated by rotary_cos and rotary_sin at + indices cache_seqlens only (i.e. we consider all tokens in @q to be at position cache_seqlens). + + See tests/test_flash_attn.py::test_flash_attn_kvcache for examples of how to use this function. + + Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads + than Q. Note that the number of heads in Q must be divisible by the number of heads in KV. + For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head + 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V. + + If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix. + For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is: + 1 1 1 1 0 + 1 1 1 1 1 + If seqlen_q = 5 and seqlen_k = 2, the causal mask is: + 0 0 + 0 0 + 0 0 + 1 0 + 1 1 + If the row of the mask is all zero, the output will be zero. + + If window_size != (-1, -1), implements sliding window local attention. Query at position i + will only attend to keys between + [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive. + + Note: Does not support backward pass. + + Arguments: + q: (batch_size, seqlen, nheads, headdim) + k_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim) if there's no block_table, + or (num_blocks, page_block_size, nheads_k, headdim) if there's a block_table (i.e. paged KV cache) + page_block_size must be a multiple of 256. + v_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim) if there's no block_table, + or (num_blocks, page_block_size, nheads_k, headdim) if there's a block_table (i.e. paged KV cache) + k [optional]: (batch_size, seqlen_new, nheads_k, headdim). If not None, we concatenate + k with k_cache, starting at the indices specified by cache_seqlens. + v [optional]: (batch_size, seqlen_new, nheads_k, headdim). Similar to k. + rotary_cos [optional]: (seqlen_ro, rotary_dim / 2). If not None, we apply rotary embedding + to k and q. Only applicable if k and v are passed in. rotary_dim must be divisible by 16. + rotary_sin [optional]: (seqlen_ro, rotary_dim / 2). Similar to rotary_cos. + cache_seqlens: int, or (batch_size,), dtype torch.int32. The sequence lengths of the + KV cache. + block_table [optional]: (batch_size, max_num_blocks_per_seq), dtype torch.int32. + cache_batch_idx: (batch_size,), dtype torch.int32. The indices used to index into the KV cache. + If None, we assume that the batch indices are [0, 1, 2, ..., batch_size - 1]. + If the indices are not distinct, and k and v are provided, the values updated in the cache + might come from any of the duplicate indices. + softmax_scale: float. The scaling of QK^T before applying softmax. + Default to 1 / sqrt(headdim). + causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling). + window_size: (left, right). If not (-1, -1), implements sliding window local attention. + rotary_interleaved: bool. Only applicable if rotary_cos and rotary_sin are passed in. + If True, rotary embedding will combine dimensions 0 & 1, 2 & 3, etc. If False, + rotary embedding will combine dimensions 0 & rotary_dim / 2, 1 & rotary_dim / 2 + 1 + (i.e. GPT-NeoX style). + alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of + (-alibi_slope * |i + seqlen_k - seqlen_q - j|) + is added to the attention score of query i and key j. + + Return: + out: (batch_size, seqlen, nheads, headdim). + """ +``` + +To see how these functions are used in a multi-head attention layer (which +includes QKV projection, output projection), see the MHA [implementation](https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/modules/mha.py). + +## Changelog + +### 2.0: Complete rewrite, 2x faster +Upgrading from FlashAttention (1.x) to FlashAttention-2 + +These functions have been renamed: +- `flash_attn_unpadded_func` -> `flash_attn_varlen_func` +- `flash_attn_unpadded_qkvpacked_func` -> `flash_attn_varlen_qkvpacked_func` +- `flash_attn_unpadded_kvpacked_func` -> `flash_attn_varlen_kvpacked_func` + +If the inputs have the same sequence lengths in the same batch, it is simpler +and faster to use these functions: +```python +flash_attn_qkvpacked_func(qkv, dropout_p=0.0, softmax_scale=None, causal=False) +``` +```python +flash_attn_func(q, k, v, dropout_p=0.0, softmax_scale=None, causal=False) +``` +### 2.1: Change behavior of causal flag + +If seqlen_q != seqlen_k and causal=True, the causal mask is aligned to the +bottom right corner of the attention matrix, instead of the top-left corner. + +For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = +masked out) is: +v2.0: + 1 0 0 0 0 + 1 1 0 0 0 +v2.1: + 1 1 1 1 0 + 1 1 1 1 1 + +If seqlen_q = 5 and seqlen_k = 2, the causal mask is: +v2.0: + 1 0 + 1 1 + 1 1 + 1 1 + 1 1 +v2.1: + 0 0 + 0 0 + 0 0 + 1 0 + 1 1 +If the row of the mask is all zero, the output will be zero. + +### 2.2: Optimize for inference + +Optimize for inference (iterative decoding) when query has very small sequence +length (e.g., query sequence length = 1). The bottleneck here is to load KV +cache as fast as possible, and we split the loading across different thread +blocks, with a separate kernel to combine results. + +See the function `flash_attn_with_kvcache` with more features for inference +(perform rotary embedding, updating KV cache inplace). + +Thanks to the xformers team, and in particular Daniel Haziza, for this +collaboration. + +### 2.3: Local (i.e., sliding window) attention + +Implement sliding window attention (i.e., local attention). Thanks to [Mistral +AI](https://mistral.ai/) and in particular Timothée Lacroix for this +contribution. Sliding window was used in the [Mistral 7B](https://mistral.ai/news/announcing-mistral-7b/) model. + +### 2.4: ALiBi (attention with linear bias), deterministic backward pass. + +Implement ALiBi (Press et al., 2021). Thanks to Sanghun Cho from Kakao Brain for this contribution. + +Implement deterministic backward pass. Thanks to engineers from [Meituan](www.meituan.com) for this contribution. + +### 2.5: Paged KV cache. + +Support paged KV cache (i.e., [PagedAttention](https://arxiv.org/abs/2309.06180)). +Thanks to @beginlner for this contribution. + +### 2.6: Softcapping. + +Support attention with softcapping, as used in Gemma-2 and Grok models. +Thanks to @Narsil and @lucidrains for this contribution. + +### 2.7: Compatibility with torch compile + +Thanks to @ani300 for this contribution. + +## Performance + +We present expected speedup (combined forward + backward pass) and memory savings from using FlashAttention against PyTorch standard attention, depending on sequence length, on different GPUs (speedup depends on memory bandwidth - we see more speedup on slower GPU memory). + +We currently have benchmarks for these GPUs: +* [A100](#a100) +* [H100](#h100) + + + +### A100 + +We display FlashAttention speedup using these parameters: +* Head dimension 64 or 128, hidden dimension 2048 (i.e. either 32 or 16 heads). +* Sequence length 512, 1k, 2k, 4k, 8k, 16k. +* Batch size set to 16k / seqlen. + +#### Speedup + +![FlashAttention speedup on A100 80GB SXM5 with FP16/BF16](assets/flash2_a100_fwd_bwd_benchmark.png) + +#### Memory + +![FlashAttention memory](assets/flashattn_memory.jpg) + +We show memory savings in this graph (note that memory footprint is the same no matter if you use dropout or masking). +Memory savings are proportional to sequence length -- since standard attention has memory quadratic in sequence length, whereas FlashAttention has memory linear in sequence length. +We see 10X memory savings at sequence length 2K, and 20X at 4K. +As a result, FlashAttention can scale to much longer sequence lengths. + +### H100 + +![FlashAttention speedup on H100 SXM5 with FP16/BF16](assets/flash2_h100_fwd_bwd_benchmark.png) + +## Full model code and training script + +We have released the full GPT model +[implementation](https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/models/gpt.py). +We also provide optimized implementations of other layers (e.g., MLP, LayerNorm, +cross-entropy loss, rotary embedding). Overall this speeds up training by 3-5x +compared to the baseline implementation from Huggingface, reaching up to 225 +TFLOPs/sec per A100, equivalent to 72% model FLOPs utilization (we don't need +any activation checkpointing). + +We also include a training +[script](https://github.com/Dao-AILab/flash-attention/tree/main/training) to +train GPT2 on Openwebtext and GPT3 on The Pile. + +## Triton implementation of FlashAttention + +Phil Tillet (OpenAI) has an experimental implementation of FlashAttention in Triton: +https://github.com/openai/triton/blob/master/python/tutorials/06-fused-attention.py + +As Triton is a higher-level language than CUDA, it might be easier to understand +and experiment with. The notations in the Triton implementation are also closer +to what's used in our paper. + +We also have an experimental implementation in Triton that support attention +bias (e.g. ALiBi): +https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/flash_attn_triton.py + + +## Tests +We test that FlashAttention produces the same output and gradient as a reference +implementation, up to some numerical tolerance. In particular, we check that the +maximum numerical error of FlashAttention is at most twice the numerical error +of a baseline implementation in Pytorch (for different head dimensions, input +dtype, sequence length, causal / non-causal). + +To run the tests: +```sh +pytest -q -s tests/test_flash_attn.py +``` +## When you encounter issues + +This new release of FlashAttention-2 has been tested on several GPT-style +models, mostly on A100 GPUs. + +If you encounter bugs, please open a GitHub Issue! + +## Tests +To run the tests: +```sh +pytest tests/test_flash_attn_ck.py +``` + +## Citation +If you use this codebase, or otherwise found our work valuable, please cite: +``` +@inproceedings{dao2022flashattention, + title={Flash{A}ttention: Fast and Memory-Efficient Exact Attention with {IO}-Awareness}, + author={Dao, Tri and Fu, Daniel Y. and Ermon, Stefano and Rudra, Atri and R{\'e}, Christopher}, + booktitle={Advances in Neural Information Processing Systems (NeurIPS)}, + year={2022} +} +@inproceedings{dao2023flashattention2, + title={Flash{A}ttention-2: Faster Attention with Better Parallelism and Work Partitioning}, + author={Dao, Tri}, + booktitle={International Conference on Learning Representations (ICLR)}, + year={2024} +} +``` diff --git a/vllm/lib/python3.10/site-packages/flash_attn-2.7.2.post1.dist-info/RECORD b/vllm/lib/python3.10/site-packages/flash_attn-2.7.2.post1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..640a792b4a1791f579c68bdeef9c34a2d9e7f0db --- /dev/null +++ b/vllm/lib/python3.10/site-packages/flash_attn-2.7.2.post1.dist-info/RECORD @@ -0,0 +1,143 @@ +flash_attn-2.7.2.post1.dist-info/AUTHORS,sha256=879BRIJqYoQbf5rrxQV_ddotMqZSpXPtxnJQ7JSjd6c,29 +flash_attn-2.7.2.post1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +flash_attn-2.7.2.post1.dist-info/LICENSE,sha256=jJzLlsBl5wYTW2y60nm3IdphVuUfOl8nxrMymvlBbXM,1558 +flash_attn-2.7.2.post1.dist-info/METADATA,sha256=gmnKU-9kdt71gMKyhmFCI7dEkiV2A-9fzuKS57ak8TQ,22437 +flash_attn-2.7.2.post1.dist-info/RECORD,, +flash_attn-2.7.2.post1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +flash_attn-2.7.2.post1.dist-info/WHEEL,sha256=ajFZpXEWjoF3CE-pJ2B52cATZBlVc3sJLvDIL5I6Tak,105 +flash_attn-2.7.2.post1.dist-info/top_level.txt,sha256=CAfUeAM8RDLF5nDjghmaJPNA5bOIUbYHfMXK0rvVjcw,36 +flash_attn/__init__.py,sha256=33Vo6R_5k7y4iJR3Bk8Op3Uzxa0dZc4OdchOtdzzJSE,291 +flash_attn/__pycache__/__init__.cpython-310.pyc,, +flash_attn/__pycache__/bert_padding.cpython-310.pyc,, +flash_attn/__pycache__/flash_attn_interface.cpython-310.pyc,, +flash_attn/__pycache__/flash_attn_triton.cpython-310.pyc,, +flash_attn/__pycache__/flash_attn_triton_og.cpython-310.pyc,, +flash_attn/__pycache__/flash_blocksparse_attention.cpython-310.pyc,, +flash_attn/__pycache__/flash_blocksparse_attn_interface.cpython-310.pyc,, +flash_attn/__pycache__/fused_softmax.cpython-310.pyc,, +flash_attn/bert_padding.py,sha256=gF1EmsdJ-HpQ86MRQ4VxDw-Sb_RVISdQALdNnoByHlw,9930 +flash_attn/flash_attn_interface.py,sha256=dHIPTJx9uYVyWSkLdUcYu6KxgbnHsP6qkgwbXqJW9jo,59398 +flash_attn/flash_attn_triton.py,sha256=Du81zbh8Ls70ExEsm00opziGvjGFfcZCoZDUO2zut9Q,41112 +flash_attn/flash_attn_triton_amd/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +flash_attn/flash_attn_triton_amd/__pycache__/__init__.cpython-310.pyc,, +flash_attn/flash_attn_triton_amd/__pycache__/bench.cpython-310.pyc,, +flash_attn/flash_attn_triton_amd/__pycache__/bwd_prefill.cpython-310.pyc,, +flash_attn/flash_attn_triton_amd/__pycache__/bwd_ref.cpython-310.pyc,, +flash_attn/flash_attn_triton_amd/__pycache__/fwd_decode.cpython-310.pyc,, +flash_attn/flash_attn_triton_amd/__pycache__/fwd_prefill.cpython-310.pyc,, +flash_attn/flash_attn_triton_amd/__pycache__/fwd_ref.cpython-310.pyc,, +flash_attn/flash_attn_triton_amd/__pycache__/interface_fa.cpython-310.pyc,, +flash_attn/flash_attn_triton_amd/__pycache__/interface_torch.cpython-310.pyc,, +flash_attn/flash_attn_triton_amd/__pycache__/test.cpython-310.pyc,, +flash_attn/flash_attn_triton_amd/__pycache__/utils.cpython-310.pyc,, +flash_attn/flash_attn_triton_amd/bench.py,sha256=wIGZHcYI_Ria2BB6FBJ87GKRxzboRP1LOf2L__aPeA4,9837 +flash_attn/flash_attn_triton_amd/bwd_prefill.py,sha256=iCe1jLP5_osxzEGyMBpLUW9koGezp91A5tsI0o6OQQM,20269 +flash_attn/flash_attn_triton_amd/bwd_ref.py,sha256=BL2_4jYjRvUodX4GjgpvyQhVDgm2a6OcnVZJnozEz_A,9972 +flash_attn/flash_attn_triton_amd/fwd_decode.py,sha256=vrk6GQqo9Tp8-SWSUzREWftqyokI5BQlTA_AAdmbEYA,23435 +flash_attn/flash_attn_triton_amd/fwd_prefill.py,sha256=ynZUspV95iXQ0Ccld3jQ0ZxGuZHWrLWwysDL4FS71R0,32986 +flash_attn/flash_attn_triton_amd/fwd_ref.py,sha256=ORf0qC92d3FFqPe0l6VA0gMjc2bVlYwUwF-g4Ilq2ko,11362 +flash_attn/flash_attn_triton_amd/interface_fa.py,sha256=7NTNBEsr3RQfylD-0IrQXN_ECryEtR7BlvJFVPz-HDM,16292 +flash_attn/flash_attn_triton_amd/interface_torch.py,sha256=DU_iepQ4h5FFvYxM4qDR5eh76l55iUiCLXdGBsc6KYo,3308 +flash_attn/flash_attn_triton_amd/test.py,sha256=7jN9tQIRNJ_xpfPHrERU9aeG70OewtQKIYqyRFxed7o,30832 +flash_attn/flash_attn_triton_amd/utils.py,sha256=XoKPb0Zzrjbbn7kX_j3YIqqhDW0xIhX4EjzqYvwPmng,12247 +flash_attn/flash_attn_triton_og.py,sha256=LmvDju7LJG-wOYhoR6Zc2AmdPK2oWyB1VJpMjRhnWnE,11328 +flash_attn/flash_blocksparse_attention.py,sha256=gsdH9VtYaVcTcP1rzZYPy1V_wUqgdvVcsB1h4Mk7RGs,7472 +flash_attn/flash_blocksparse_attn_interface.py,sha256=2qK2KvVCt851_j8ZzHvjS-aMfdgVDu1yne67-iScWfo,7265 +flash_attn/fused_softmax.py,sha256=0-XbXo7R1a5h4-EpUzPy--lwlGytfTDW34WGM5nmBAY,7793 +flash_attn/layers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +flash_attn/layers/__pycache__/__init__.cpython-310.pyc,, +flash_attn/layers/__pycache__/patch_embed.cpython-310.pyc,, +flash_attn/layers/__pycache__/rotary.cpython-310.pyc,, +flash_attn/layers/patch_embed.py,sha256=H58CgME_qSOPTZLOG08wFgrQS1j34pvNwMPrkTj3Ek4,2136 +flash_attn/layers/rotary.py,sha256=MqsUZ-Gxa0OcYLtL8OsjHIOkqyTacQHkMGpqADa2e6Q,21239 +flash_attn/losses/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +flash_attn/losses/__pycache__/__init__.cpython-310.pyc,, +flash_attn/losses/__pycache__/cross_entropy.cpython-310.pyc,, +flash_attn/losses/cross_entropy.py,sha256=tj5IoeUZuSzA1_82UFr7o-1WuoHyKAc1gVS6fWzAbDQ,3197 +flash_attn/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +flash_attn/models/__pycache__/__init__.cpython-310.pyc,, +flash_attn/models/__pycache__/baichuan.cpython-310.pyc,, +flash_attn/models/__pycache__/bert.cpython-310.pyc,, +flash_attn/models/__pycache__/bigcode.cpython-310.pyc,, +flash_attn/models/__pycache__/btlm.cpython-310.pyc,, +flash_attn/models/__pycache__/falcon.cpython-310.pyc,, +flash_attn/models/__pycache__/gpt.cpython-310.pyc,, +flash_attn/models/__pycache__/gpt_neox.cpython-310.pyc,, +flash_attn/models/__pycache__/gptj.cpython-310.pyc,, +flash_attn/models/__pycache__/llama.cpython-310.pyc,, +flash_attn/models/__pycache__/opt.cpython-310.pyc,, +flash_attn/models/__pycache__/vit.cpython-310.pyc,, +flash_attn/models/baichuan.py,sha256=eFNWwoRQ02AIeQP0OoK8pNvYw0dqnHOshLigCQPkAEc,5730 +flash_attn/models/bert.py,sha256=dMM6-Pj814pgQdsKkgkwg_grNZ7snM2juSgoUB14R7Q,33232 +flash_attn/models/bigcode.py,sha256=mkYeItoJtmWVf2wKkUs5oXjwdbTdGSo5eHxi0-1maZ8,9383 +flash_attn/models/btlm.py,sha256=d8YDjYTa2G1DutYu-YuVf15S_Dn6oKn8-HzERoersLA,4631 +flash_attn/models/falcon.py,sha256=mA3wGv1a4zhbrUSlFNVVmTgVjiXc1sFTOi55eYpgSPo,6033 +flash_attn/models/gpt.py,sha256=QGBMCw_osxD4VMMj1uC6TMlXlM5lIInxSUKmq5J5kSU,47669 +flash_attn/models/gpt_neox.py,sha256=_704a9KQ2PcnID8uMV7yZ4ggjGlh1zZH5gszue6D1bI,5159 +flash_attn/models/gptj.py,sha256=k2eqMNyMbU7CJVM_BHBjlKt0ByFz6ITSETqS1mJa89g,4436 +flash_attn/models/llama.py,sha256=bDRI308iRpeJngZLrQlLTGYAmwYotqzUxnjBMirfn-k,16581 +flash_attn/models/opt.py,sha256=L0ZIWKpSP44lcEbiVCzVT9un_5gFMAW6cvnS3KHcb-A,5164 +flash_attn/models/vit.py,sha256=7i0WUI_jZvQ5TMoSKPPzf77ZcyMDfDJuQaINzXN_iQU,14074 +flash_attn/modules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +flash_attn/modules/__pycache__/__init__.cpython-310.pyc,, +flash_attn/modules/__pycache__/block.cpython-310.pyc,, +flash_attn/modules/__pycache__/embedding.cpython-310.pyc,, +flash_attn/modules/__pycache__/mha.cpython-310.pyc,, +flash_attn/modules/__pycache__/mlp.cpython-310.pyc,, +flash_attn/modules/block.py,sha256=WLi7JKj9_Zpk89ppzC7WTIoykJJ7TLOJbUSZePNnW1E,17349 +flash_attn/modules/embedding.py,sha256=RCVeeiomlGNkLeQD8G6Udvex-NDI_xKD45hXjgZ2lbQ,8693 +flash_attn/modules/mha.py,sha256=V6Ynog9pb_G9UVxetRjXlmWGExZlxmJkYVwAExXqUEk,43297 +flash_attn/modules/mlp.py,sha256=G6KPQagfKq1DRn7hQRJ3OHznFJLZHj_PiidZE_zcLgg,6033 +flash_attn/ops/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +flash_attn/ops/__pycache__/__init__.cpython-310.pyc,, +flash_attn/ops/__pycache__/activations.cpython-310.pyc,, +flash_attn/ops/__pycache__/fused_dense.cpython-310.pyc,, +flash_attn/ops/__pycache__/layer_norm.cpython-310.pyc,, +flash_attn/ops/__pycache__/rms_norm.cpython-310.pyc,, +flash_attn/ops/activations.py,sha256=t5lzNg1In8LP6bKeTnyeMizwqjv27JGbJ6ylPdGvZYg,3939 +flash_attn/ops/fused_dense.py,sha256=ACJKqkIfxZibxI3nb5ycb3pXBKaL_CM63rUUyQYNAUE,27907 +flash_attn/ops/layer_norm.py,sha256=zr7NXIm-2mtEynTp1CS0fbFGI2Mqdp41dY4AfDWF6EQ,22443 +flash_attn/ops/rms_norm.py,sha256=XEnihcj0a4aSz4LO55m5iKGVn4HKTeKN8TIyHjuDgxI,3988 +flash_attn/ops/triton/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 +flash_attn/ops/triton/__pycache__/__init__.cpython-310.pyc,, +flash_attn/ops/triton/__pycache__/cross_entropy.cpython-310.pyc,, +flash_attn/ops/triton/__pycache__/k_activations.cpython-310.pyc,, +flash_attn/ops/triton/__pycache__/layer_norm.cpython-310.pyc,, +flash_attn/ops/triton/__pycache__/linear.cpython-310.pyc,, +flash_attn/ops/triton/__pycache__/mlp.cpython-310.pyc,, +flash_attn/ops/triton/__pycache__/rotary.cpython-310.pyc,, +flash_attn/ops/triton/cross_entropy.py,sha256=hjSfLhv4cKt-N8hTfpgkGMFdxhs8B4II6VIkEtck8EM,12845 +flash_attn/ops/triton/k_activations.py,sha256=-Z3vIyO4JkqBMipKsPvhzmxljtBdIhJCsl_M-_ESqBo,4034 +flash_attn/ops/triton/layer_norm.py,sha256=rNJwuijsZ6sKDtKHlkbT0qDzbi6vetVjjibpy9YRHFQ,35715 +flash_attn/ops/triton/linear.py,sha256=OtRvKz8xdpl-7v3q_ZTaS9fdBt9XrzMyapgRr50uBbM,20841 +flash_attn/ops/triton/mlp.py,sha256=_5lbZJFZg_pXeXYITGt4V_6LkB_yddClB_jt-diCOdw,6068 +flash_attn/ops/triton/rotary.py,sha256=WH7tELBLZ23znuxnYUAzP7YWqwMXJmRgUQ8B64Vjdn4,8583 +flash_attn/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +flash_attn/utils/__pycache__/__init__.cpython-310.pyc,, +flash_attn/utils/__pycache__/benchmark.cpython-310.pyc,, +flash_attn/utils/__pycache__/distributed.cpython-310.pyc,, +flash_attn/utils/__pycache__/generation.cpython-310.pyc,, +flash_attn/utils/__pycache__/pretrained.cpython-310.pyc,, +flash_attn/utils/benchmark.py,sha256=JDtzdVhFyMIQqs3edbcXdXnmDf-O7RVpmZmn2ZFCvI0,7369 +flash_attn/utils/distributed.py,sha256=qhcybRXtslssuV9LYaQy37haPaPtklM4YUMDx9UvnnQ,5825 +flash_attn/utils/generation.py,sha256=9IVPvkf_hlbsxCWgECUA03293qHxXzWDFCTAOdqbAVo,30694 +flash_attn/utils/pretrained.py,sha256=VZ6qk90sBJA7M86gRzPsNc_CkQXkj5HyrJvwl0I355k,3246 +flash_attn_2_cuda.cpython-310-x86_64-linux-gnu.so,sha256=GWNPhfEcA1EY7BBJNjK52IwMRfiXtD2qE04qx0K_4_M,602741208 +hopper/__init__.py,sha256=tFIhpQJfQnBMRzNaynKKbeNwvSPrBsV5DFEN9zDcGqc,25 +hopper/__pycache__/__init__.cpython-310.pyc,, +hopper/__pycache__/benchmark_attn.cpython-310.pyc,, +hopper/__pycache__/benchmark_flash_attention_fp8.cpython-310.pyc,, +hopper/__pycache__/benchmark_split_kv.cpython-310.pyc,, +hopper/__pycache__/flash_attn_interface.cpython-310.pyc,, +hopper/__pycache__/setup.cpython-310.pyc,, +hopper/__pycache__/test_attn_kvcache.cpython-310.pyc,, +hopper/__pycache__/test_flash_attn.cpython-310.pyc,, +hopper/__pycache__/test_kvcache.cpython-310.pyc,, +hopper/benchmark_attn.py,sha256=6Jsmkmc9QUKtD8bTYQELspvUvz0yZ5G9YbUioC69bsg,15053 +hopper/benchmark_flash_attention_fp8.py,sha256=wqYWaxowvd0s1IoAWMZEpyjt7Rtz9Juj13geXP-ud9E,13273 +hopper/benchmark_split_kv.py,sha256=A8boLlJuhCDhRxz3leVaBEwjIgxGeJAqJYoGp4mpdR4,13183 +hopper/flash_attn_interface.py,sha256=OG8naR5sR0r6Yk-9jtu-0G7eJMngl-cCy3ize5wiVsQ,23117 +hopper/setup.py,sha256=uUpsYbAZsfLMm3vZMe06eDYgW_O8JoheJz7OIXpij2o,13711 +hopper/test_attn_kvcache.py,sha256=IDeHeyWpnb659RZgnY3seo1hZBLmrouNSdUFLgeeOMA,19539 +hopper/test_flash_attn.py,sha256=ggRSzGhMg3B8uJ_nGrx6__P0ELSZpzLRWBbHAtJ2k6c,37128 +hopper/test_kvcache.py,sha256=R7sk9mdth2R-7BUstzPk9N4BI9cS2JNb1oJgF-uxGXQ,7612 diff --git a/vllm/lib/python3.10/site-packages/flash_attn-2.7.2.post1.dist-info/REQUESTED b/vllm/lib/python3.10/site-packages/flash_attn-2.7.2.post1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/flash_attn-2.7.2.post1.dist-info/WHEEL b/vllm/lib/python3.10/site-packages/flash_attn-2.7.2.post1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..d277b161656396d81ebf2f7aeb6a42d499cc0b6e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/flash_attn-2.7.2.post1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.45.1) +Root-Is-Purelib: false +Tag: cp310-cp310-linux_x86_64 + diff --git a/vllm/lib/python3.10/site-packages/flash_attn-2.7.2.post1.dist-info/top_level.txt b/vllm/lib/python3.10/site-packages/flash_attn-2.7.2.post1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..f96246cc319a2b0232155dc96f7e6e1e3b7622f4 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/flash_attn-2.7.2.post1.dist-info/top_level.txt @@ -0,0 +1,3 @@ +flash_attn +flash_attn_2_cuda +hopper diff --git a/vllm/lib/python3.10/site-packages/pandas/plotting/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/pandas/plotting/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82339811d1e202be80266b5ed52ab231732387fb Binary files /dev/null and b/vllm/lib/python3.10/site-packages/pandas/plotting/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/pandas/plotting/__pycache__/_misc.cpython-310.pyc b/vllm/lib/python3.10/site-packages/pandas/plotting/__pycache__/_misc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08b3ee5c355e10dd7995e4d596645269bd5f51e4 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/pandas/plotting/__pycache__/_misc.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__init__.py b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..75c61da03795af0d4f60cd4d4a8b8e0dd45e3d5e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__init__.py @@ -0,0 +1,93 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from pandas.plotting._matplotlib.boxplot import ( + BoxPlot, + boxplot, + boxplot_frame, + boxplot_frame_groupby, +) +from pandas.plotting._matplotlib.converter import ( + deregister, + register, +) +from pandas.plotting._matplotlib.core import ( + AreaPlot, + BarhPlot, + BarPlot, + HexBinPlot, + LinePlot, + PiePlot, + ScatterPlot, +) +from pandas.plotting._matplotlib.hist import ( + HistPlot, + KdePlot, + hist_frame, + hist_series, +) +from pandas.plotting._matplotlib.misc import ( + andrews_curves, + autocorrelation_plot, + bootstrap_plot, + lag_plot, + parallel_coordinates, + radviz, + scatter_matrix, +) +from pandas.plotting._matplotlib.tools import table + +if TYPE_CHECKING: + from pandas.plotting._matplotlib.core import MPLPlot + +PLOT_CLASSES: dict[str, type[MPLPlot]] = { + "line": LinePlot, + "bar": BarPlot, + "barh": BarhPlot, + "box": BoxPlot, + "hist": HistPlot, + "kde": KdePlot, + "area": AreaPlot, + "pie": PiePlot, + "scatter": ScatterPlot, + "hexbin": HexBinPlot, +} + + +def plot(data, kind, **kwargs): + # Importing pyplot at the top of the file (before the converters are + # registered) causes problems in matplotlib 2 (converters seem to not + # work) + import matplotlib.pyplot as plt + + if kwargs.pop("reuse_plot", False): + ax = kwargs.get("ax") + if ax is None and len(plt.get_fignums()) > 0: + with plt.rc_context(): + ax = plt.gca() + kwargs["ax"] = getattr(ax, "left_ax", ax) + plot_obj = PLOT_CLASSES[kind](data, **kwargs) + plot_obj.generate() + plot_obj.draw() + return plot_obj.result + + +__all__ = [ + "plot", + "hist_series", + "hist_frame", + "boxplot", + "boxplot_frame", + "boxplot_frame_groupby", + "table", + "andrews_curves", + "autocorrelation_plot", + "bootstrap_plot", + "lag_plot", + "parallel_coordinates", + "radviz", + "scatter_matrix", + "register", + "deregister", +] diff --git a/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/converter.cpython-310.pyc b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/converter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0679a604a1df1cada89e9c78cba4003667c0c1c Binary files /dev/null and b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/converter.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/groupby.cpython-310.pyc b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/groupby.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bfec7df2fdc8b531dd0c27d52e8212e54b9cb3f Binary files /dev/null and b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/groupby.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/timeseries.cpython-310.pyc b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/timeseries.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..404dc7bb47e9227eea4359d59cc0933729621d37 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/timeseries.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/boxplot.py b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/boxplot.py new file mode 100644 index 0000000000000000000000000000000000000000..d2b76decaa75d3e7adc9792763db4e276e514ff1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/boxplot.py @@ -0,0 +1,572 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Literal, + NamedTuple, +) +import warnings + +from matplotlib.artist import setp +import numpy as np + +from pandas._libs import lib +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import is_dict_like +from pandas.core.dtypes.generic import ABCSeries +from pandas.core.dtypes.missing import remove_na_arraylike + +import pandas as pd +import pandas.core.common as com + +from pandas.io.formats.printing import pprint_thing +from pandas.plotting._matplotlib.core import ( + LinePlot, + MPLPlot, +) +from pandas.plotting._matplotlib.groupby import create_iter_data_given_by +from pandas.plotting._matplotlib.style import get_standard_colors +from pandas.plotting._matplotlib.tools import ( + create_subplots, + flatten_axes, + maybe_adjust_figure, +) + +if TYPE_CHECKING: + from collections.abc import Collection + + from matplotlib.axes import Axes + from matplotlib.figure import Figure + from matplotlib.lines import Line2D + + from pandas._typing import MatplotlibColor + + +def _set_ticklabels(ax: Axes, labels: list[str], is_vertical: bool, **kwargs) -> None: + """Set the tick labels of a given axis. + + Due to https://github.com/matplotlib/matplotlib/pull/17266, we need to handle the + case of repeated ticks (due to `FixedLocator`) and thus we duplicate the number of + labels. + """ + ticks = ax.get_xticks() if is_vertical else ax.get_yticks() + if len(ticks) != len(labels): + i, remainder = divmod(len(ticks), len(labels)) + assert remainder == 0, remainder + labels *= i + if is_vertical: + ax.set_xticklabels(labels, **kwargs) + else: + ax.set_yticklabels(labels, **kwargs) + + +class BoxPlot(LinePlot): + @property + def _kind(self) -> Literal["box"]: + return "box" + + _layout_type = "horizontal" + + _valid_return_types = (None, "axes", "dict", "both") + + class BP(NamedTuple): + # namedtuple to hold results + ax: Axes + lines: dict[str, list[Line2D]] + + def __init__(self, data, return_type: str = "axes", **kwargs) -> None: + if return_type not in self._valid_return_types: + raise ValueError("return_type must be {None, 'axes', 'dict', 'both'}") + + self.return_type = return_type + # Do not call LinePlot.__init__ which may fill nan + MPLPlot.__init__(self, data, **kwargs) # pylint: disable=non-parent-init-called + + if self.subplots: + # Disable label ax sharing. Otherwise, all subplots shows last + # column label + if self.orientation == "vertical": + self.sharex = False + else: + self.sharey = False + + # error: Signature of "_plot" incompatible with supertype "MPLPlot" + @classmethod + def _plot( # type: ignore[override] + cls, ax: Axes, y: np.ndarray, column_num=None, return_type: str = "axes", **kwds + ): + ys: np.ndarray | list[np.ndarray] + if y.ndim == 2: + ys = [remove_na_arraylike(v) for v in y] + # Boxplot fails with empty arrays, so need to add a NaN + # if any cols are empty + # GH 8181 + ys = [v if v.size > 0 else np.array([np.nan]) for v in ys] + else: + ys = remove_na_arraylike(y) + bp = ax.boxplot(ys, **kwds) + + if return_type == "dict": + return bp, bp + elif return_type == "both": + return cls.BP(ax=ax, lines=bp), bp + else: + return ax, bp + + def _validate_color_args(self, color, colormap): + if color is lib.no_default: + return None + + if colormap is not None: + warnings.warn( + "'color' and 'colormap' cannot be used " + "simultaneously. Using 'color'", + stacklevel=find_stack_level(), + ) + + if isinstance(color, dict): + valid_keys = ["boxes", "whiskers", "medians", "caps"] + for key in color: + if key not in valid_keys: + raise ValueError( + f"color dict contains invalid key '{key}'. " + f"The key must be either {valid_keys}" + ) + return color + + @cache_readonly + def _color_attrs(self): + # get standard colors for default + # use 2 colors by default, for box/whisker and median + # flier colors isn't needed here + # because it can be specified by ``sym`` kw + return get_standard_colors(num_colors=3, colormap=self.colormap, color=None) + + @cache_readonly + def _boxes_c(self): + return self._color_attrs[0] + + @cache_readonly + def _whiskers_c(self): + return self._color_attrs[0] + + @cache_readonly + def _medians_c(self): + return self._color_attrs[2] + + @cache_readonly + def _caps_c(self): + return self._color_attrs[0] + + def _get_colors( + self, + num_colors=None, + color_kwds: dict[str, MatplotlibColor] + | MatplotlibColor + | Collection[MatplotlibColor] + | None = "color", + ) -> None: + pass + + def maybe_color_bp(self, bp) -> None: + if isinstance(self.color, dict): + boxes = self.color.get("boxes", self._boxes_c) + whiskers = self.color.get("whiskers", self._whiskers_c) + medians = self.color.get("medians", self._medians_c) + caps = self.color.get("caps", self._caps_c) + else: + # Other types are forwarded to matplotlib + # If None, use default colors + boxes = self.color or self._boxes_c + whiskers = self.color or self._whiskers_c + medians = self.color or self._medians_c + caps = self.color or self._caps_c + + color_tup = (boxes, whiskers, medians, caps) + maybe_color_bp(bp, color_tup=color_tup, **self.kwds) + + def _make_plot(self, fig: Figure) -> None: + if self.subplots: + self._return_obj = pd.Series(dtype=object) + + # Re-create iterated data if `by` is assigned by users + data = ( + create_iter_data_given_by(self.data, self._kind) + if self.by is not None + else self.data + ) + + # error: Argument "data" to "_iter_data" of "MPLPlot" has + # incompatible type "object"; expected "DataFrame | + # dict[Hashable, Series | DataFrame]" + for i, (label, y) in enumerate(self._iter_data(data=data)): # type: ignore[arg-type] + ax = self._get_ax(i) + kwds = self.kwds.copy() + + # When by is applied, show title for subplots to know which group it is + # just like df.boxplot, and need to apply T on y to provide right input + if self.by is not None: + y = y.T + ax.set_title(pprint_thing(label)) + + # When `by` is assigned, the ticklabels will become unique grouped + # values, instead of label which is used as subtitle in this case. + # error: "Index" has no attribute "levels"; maybe "nlevels"? + levels = self.data.columns.levels # type: ignore[attr-defined] + ticklabels = [pprint_thing(col) for col in levels[0]] + else: + ticklabels = [pprint_thing(label)] + + ret, bp = self._plot( + ax, y, column_num=i, return_type=self.return_type, **kwds + ) + self.maybe_color_bp(bp) + self._return_obj[label] = ret + _set_ticklabels( + ax=ax, labels=ticklabels, is_vertical=self.orientation == "vertical" + ) + else: + y = self.data.values.T + ax = self._get_ax(0) + kwds = self.kwds.copy() + + ret, bp = self._plot( + ax, y, column_num=0, return_type=self.return_type, **kwds + ) + self.maybe_color_bp(bp) + self._return_obj = ret + + labels = [pprint_thing(left) for left in self.data.columns] + if not self.use_index: + labels = [pprint_thing(key) for key in range(len(labels))] + _set_ticklabels( + ax=ax, labels=labels, is_vertical=self.orientation == "vertical" + ) + + def _make_legend(self) -> None: + pass + + def _post_plot_logic(self, ax: Axes, data) -> None: + # GH 45465: make sure that the boxplot doesn't ignore xlabel/ylabel + if self.xlabel: + ax.set_xlabel(pprint_thing(self.xlabel)) + if self.ylabel: + ax.set_ylabel(pprint_thing(self.ylabel)) + + @property + def orientation(self) -> Literal["horizontal", "vertical"]: + if self.kwds.get("vert", True): + return "vertical" + else: + return "horizontal" + + @property + def result(self): + if self.return_type is None: + return super().result + else: + return self._return_obj + + +def maybe_color_bp(bp, color_tup, **kwds) -> None: + # GH#30346, when users specifying those arguments explicitly, our defaults + # for these four kwargs should be overridden; if not, use Pandas settings + if not kwds.get("boxprops"): + setp(bp["boxes"], color=color_tup[0], alpha=1) + if not kwds.get("whiskerprops"): + setp(bp["whiskers"], color=color_tup[1], alpha=1) + if not kwds.get("medianprops"): + setp(bp["medians"], color=color_tup[2], alpha=1) + if not kwds.get("capprops"): + setp(bp["caps"], color=color_tup[3], alpha=1) + + +def _grouped_plot_by_column( + plotf, + data, + columns=None, + by=None, + numeric_only: bool = True, + grid: bool = False, + figsize: tuple[float, float] | None = None, + ax=None, + layout=None, + return_type=None, + **kwargs, +): + grouped = data.groupby(by, observed=False) + if columns is None: + if not isinstance(by, (list, tuple)): + by = [by] + columns = data._get_numeric_data().columns.difference(by) + naxes = len(columns) + fig, axes = create_subplots( + naxes=naxes, + sharex=kwargs.pop("sharex", True), + sharey=kwargs.pop("sharey", True), + figsize=figsize, + ax=ax, + layout=layout, + ) + + _axes = flatten_axes(axes) + + # GH 45465: move the "by" label based on "vert" + xlabel, ylabel = kwargs.pop("xlabel", None), kwargs.pop("ylabel", None) + if kwargs.get("vert", True): + xlabel = xlabel or by + else: + ylabel = ylabel or by + + ax_values = [] + + for i, col in enumerate(columns): + ax = _axes[i] + gp_col = grouped[col] + keys, values = zip(*gp_col) + re_plotf = plotf(keys, values, ax, xlabel=xlabel, ylabel=ylabel, **kwargs) + ax.set_title(col) + ax_values.append(re_plotf) + ax.grid(grid) + + result = pd.Series(ax_values, index=columns, copy=False) + + # Return axes in multiplot case, maybe revisit later # 985 + if return_type is None: + result = axes + + byline = by[0] if len(by) == 1 else by + fig.suptitle(f"Boxplot grouped by {byline}") + maybe_adjust_figure(fig, bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2) + + return result + + +def boxplot( + data, + column=None, + by=None, + ax=None, + fontsize: int | None = None, + rot: int = 0, + grid: bool = True, + figsize: tuple[float, float] | None = None, + layout=None, + return_type=None, + **kwds, +): + import matplotlib.pyplot as plt + + # validate return_type: + if return_type not in BoxPlot._valid_return_types: + raise ValueError("return_type must be {'axes', 'dict', 'both'}") + + if isinstance(data, ABCSeries): + data = data.to_frame("x") + column = "x" + + def _get_colors(): + # num_colors=3 is required as method maybe_color_bp takes the colors + # in positions 0 and 2. + # if colors not provided, use same defaults as DataFrame.plot.box + result = get_standard_colors(num_colors=3) + result = np.take(result, [0, 0, 2]) + result = np.append(result, "k") + + colors = kwds.pop("color", None) + if colors: + if is_dict_like(colors): + # replace colors in result array with user-specified colors + # taken from the colors dict parameter + # "boxes" value placed in position 0, "whiskers" in 1, etc. + valid_keys = ["boxes", "whiskers", "medians", "caps"] + key_to_index = dict(zip(valid_keys, range(4))) + for key, value in colors.items(): + if key in valid_keys: + result[key_to_index[key]] = value + else: + raise ValueError( + f"color dict contains invalid key '{key}'. " + f"The key must be either {valid_keys}" + ) + else: + result.fill(colors) + + return result + + def plot_group(keys, values, ax: Axes, **kwds): + # GH 45465: xlabel/ylabel need to be popped out before plotting happens + xlabel, ylabel = kwds.pop("xlabel", None), kwds.pop("ylabel", None) + if xlabel: + ax.set_xlabel(pprint_thing(xlabel)) + if ylabel: + ax.set_ylabel(pprint_thing(ylabel)) + + keys = [pprint_thing(x) for x in keys] + values = [np.asarray(remove_na_arraylike(v), dtype=object) for v in values] + bp = ax.boxplot(values, **kwds) + if fontsize is not None: + ax.tick_params(axis="both", labelsize=fontsize) + + # GH 45465: x/y are flipped when "vert" changes + _set_ticklabels( + ax=ax, labels=keys, is_vertical=kwds.get("vert", True), rotation=rot + ) + maybe_color_bp(bp, color_tup=colors, **kwds) + + # Return axes in multiplot case, maybe revisit later # 985 + if return_type == "dict": + return bp + elif return_type == "both": + return BoxPlot.BP(ax=ax, lines=bp) + else: + return ax + + colors = _get_colors() + if column is None: + columns = None + elif isinstance(column, (list, tuple)): + columns = column + else: + columns = [column] + + if by is not None: + # Prefer array return type for 2-D plots to match the subplot layout + # https://github.com/pandas-dev/pandas/pull/12216#issuecomment-241175580 + result = _grouped_plot_by_column( + plot_group, + data, + columns=columns, + by=by, + grid=grid, + figsize=figsize, + ax=ax, + layout=layout, + return_type=return_type, + **kwds, + ) + else: + if return_type is None: + return_type = "axes" + if layout is not None: + raise ValueError("The 'layout' keyword is not supported when 'by' is None") + + if ax is None: + rc = {"figure.figsize": figsize} if figsize is not None else {} + with plt.rc_context(rc): + ax = plt.gca() + data = data._get_numeric_data() + naxes = len(data.columns) + if naxes == 0: + raise ValueError( + "boxplot method requires numerical columns, nothing to plot." + ) + if columns is None: + columns = data.columns + else: + data = data[columns] + + result = plot_group(columns, data.values.T, ax, **kwds) + ax.grid(grid) + + return result + + +def boxplot_frame( + self, + column=None, + by=None, + ax=None, + fontsize: int | None = None, + rot: int = 0, + grid: bool = True, + figsize: tuple[float, float] | None = None, + layout=None, + return_type=None, + **kwds, +): + import matplotlib.pyplot as plt + + ax = boxplot( + self, + column=column, + by=by, + ax=ax, + fontsize=fontsize, + grid=grid, + rot=rot, + figsize=figsize, + layout=layout, + return_type=return_type, + **kwds, + ) + plt.draw_if_interactive() + return ax + + +def boxplot_frame_groupby( + grouped, + subplots: bool = True, + column=None, + fontsize: int | None = None, + rot: int = 0, + grid: bool = True, + ax=None, + figsize: tuple[float, float] | None = None, + layout=None, + sharex: bool = False, + sharey: bool = True, + **kwds, +): + if subplots is True: + naxes = len(grouped) + fig, axes = create_subplots( + naxes=naxes, + squeeze=False, + ax=ax, + sharex=sharex, + sharey=sharey, + figsize=figsize, + layout=layout, + ) + axes = flatten_axes(axes) + + ret = pd.Series(dtype=object) + + for (key, group), ax in zip(grouped, axes): + d = group.boxplot( + ax=ax, column=column, fontsize=fontsize, rot=rot, grid=grid, **kwds + ) + ax.set_title(pprint_thing(key)) + ret.loc[key] = d + maybe_adjust_figure(fig, bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2) + else: + keys, frames = zip(*grouped) + if grouped.axis == 0: + df = pd.concat(frames, keys=keys, axis=1) + elif len(frames) > 1: + df = frames[0].join(frames[1::]) + else: + df = frames[0] + + # GH 16748, DataFrameGroupby fails when subplots=False and `column` argument + # is assigned, and in this case, since `df` here becomes MI after groupby, + # so we need to couple the keys (grouped values) and column (original df + # column) together to search for subset to plot + if column is not None: + column = com.convert_to_list_like(column) + multi_key = pd.MultiIndex.from_product([keys, column]) + column = list(multi_key.values) + ret = df.boxplot( + column=column, + fontsize=fontsize, + rot=rot, + grid=grid, + ax=ax, + figsize=figsize, + layout=layout, + **kwds, + ) + return ret diff --git a/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/converter.py b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/converter.py new file mode 100644 index 0000000000000000000000000000000000000000..9acb93ce69a9ca25962139891e6bb1e5e163add8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/converter.py @@ -0,0 +1,1139 @@ +from __future__ import annotations + +import contextlib +import datetime as pydt +from datetime import ( + datetime, + timedelta, + tzinfo, +) +import functools +from typing import ( + TYPE_CHECKING, + Any, + cast, +) +import warnings + +import matplotlib.dates as mdates +from matplotlib.ticker import ( + AutoLocator, + Formatter, + Locator, +) +from matplotlib.transforms import nonsingular +import matplotlib.units as munits +import numpy as np + +from pandas._libs import lib +from pandas._libs.tslibs import ( + Timestamp, + to_offset, +) +from pandas._libs.tslibs.dtypes import ( + FreqGroup, + periods_per_day, +) +from pandas._typing import ( + F, + npt, +) + +from pandas.core.dtypes.common import ( + is_float, + is_float_dtype, + is_integer, + is_integer_dtype, + is_nested_list_like, +) + +from pandas import ( + Index, + Series, + get_option, +) +import pandas.core.common as com +from pandas.core.indexes.datetimes import date_range +from pandas.core.indexes.period import ( + Period, + PeriodIndex, + period_range, +) +import pandas.core.tools.datetimes as tools + +if TYPE_CHECKING: + from collections.abc import Generator + + from matplotlib.axis import Axis + + from pandas._libs.tslibs.offsets import BaseOffset + + +_mpl_units = {} # Cache for units overwritten by us + + +def get_pairs(): + pairs = [ + (Timestamp, DatetimeConverter), + (Period, PeriodConverter), + (pydt.datetime, DatetimeConverter), + (pydt.date, DatetimeConverter), + (pydt.time, TimeConverter), + (np.datetime64, DatetimeConverter), + ] + return pairs + + +def register_pandas_matplotlib_converters(func: F) -> F: + """ + Decorator applying pandas_converters. + """ + + @functools.wraps(func) + def wrapper(*args, **kwargs): + with pandas_converters(): + return func(*args, **kwargs) + + return cast(F, wrapper) + + +@contextlib.contextmanager +def pandas_converters() -> Generator[None, None, None]: + """ + Context manager registering pandas' converters for a plot. + + See Also + -------- + register_pandas_matplotlib_converters : Decorator that applies this. + """ + value = get_option("plotting.matplotlib.register_converters") + + if value: + # register for True or "auto" + register() + try: + yield + finally: + if value == "auto": + # only deregister for "auto" + deregister() + + +def register() -> None: + pairs = get_pairs() + for type_, cls in pairs: + # Cache previous converter if present + if type_ in munits.registry and not isinstance(munits.registry[type_], cls): + previous = munits.registry[type_] + _mpl_units[type_] = previous + # Replace with pandas converter + munits.registry[type_] = cls() + + +def deregister() -> None: + # Renamed in pandas.plotting.__init__ + for type_, cls in get_pairs(): + # We use type to catch our classes directly, no inheritance + if type(munits.registry.get(type_)) is cls: + munits.registry.pop(type_) + + # restore the old keys + for unit, formatter in _mpl_units.items(): + if type(formatter) not in {DatetimeConverter, PeriodConverter, TimeConverter}: + # make it idempotent by excluding ours. + munits.registry[unit] = formatter + + +def _to_ordinalf(tm: pydt.time) -> float: + tot_sec = tm.hour * 3600 + tm.minute * 60 + tm.second + tm.microsecond / 10**6 + return tot_sec + + +def time2num(d): + if isinstance(d, str): + parsed = Timestamp(d) + return _to_ordinalf(parsed.time()) + if isinstance(d, pydt.time): + return _to_ordinalf(d) + return d + + +class TimeConverter(munits.ConversionInterface): + @staticmethod + def convert(value, unit, axis): + valid_types = (str, pydt.time) + if isinstance(value, valid_types) or is_integer(value) or is_float(value): + return time2num(value) + if isinstance(value, Index): + return value.map(time2num) + if isinstance(value, (list, tuple, np.ndarray, Index)): + return [time2num(x) for x in value] + return value + + @staticmethod + def axisinfo(unit, axis) -> munits.AxisInfo | None: + if unit != "time": + return None + + majloc = AutoLocator() + majfmt = TimeFormatter(majloc) + return munits.AxisInfo(majloc=majloc, majfmt=majfmt, label="time") + + @staticmethod + def default_units(x, axis) -> str: + return "time" + + +# time formatter +class TimeFormatter(Formatter): + def __init__(self, locs) -> None: + self.locs = locs + + def __call__(self, x, pos: int | None = 0) -> str: + """ + Return the time of day as a formatted string. + + Parameters + ---------- + x : float + The time of day specified as seconds since 00:00 (midnight), + with up to microsecond precision. + pos + Unused + + Returns + ------- + str + A string in HH:MM:SS.mmmuuu format. Microseconds, + milliseconds and seconds are only displayed if non-zero. + """ + fmt = "%H:%M:%S.%f" + s = int(x) + msus = round((x - s) * 10**6) + ms = msus // 1000 + us = msus % 1000 + m, s = divmod(s, 60) + h, m = divmod(m, 60) + _, h = divmod(h, 24) + if us != 0: + return pydt.time(h, m, s, msus).strftime(fmt) + elif ms != 0: + return pydt.time(h, m, s, msus).strftime(fmt)[:-3] + elif s != 0: + return pydt.time(h, m, s).strftime("%H:%M:%S") + + return pydt.time(h, m).strftime("%H:%M") + + +# Period Conversion + + +class PeriodConverter(mdates.DateConverter): + @staticmethod + def convert(values, units, axis): + if is_nested_list_like(values): + values = [PeriodConverter._convert_1d(v, units, axis) for v in values] + else: + values = PeriodConverter._convert_1d(values, units, axis) + return values + + @staticmethod + def _convert_1d(values, units, axis): + if not hasattr(axis, "freq"): + raise TypeError("Axis must have `freq` set to convert to Periods") + valid_types = (str, datetime, Period, pydt.date, pydt.time, np.datetime64) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "Period with BDay freq is deprecated", category=FutureWarning + ) + warnings.filterwarnings( + "ignore", r"PeriodDtype\[B\] is deprecated", category=FutureWarning + ) + if ( + isinstance(values, valid_types) + or is_integer(values) + or is_float(values) + ): + return get_datevalue(values, axis.freq) + elif isinstance(values, PeriodIndex): + return values.asfreq(axis.freq).asi8 + elif isinstance(values, Index): + return values.map(lambda x: get_datevalue(x, axis.freq)) + elif lib.infer_dtype(values, skipna=False) == "period": + # https://github.com/pandas-dev/pandas/issues/24304 + # convert ndarray[period] -> PeriodIndex + return PeriodIndex(values, freq=axis.freq).asi8 + elif isinstance(values, (list, tuple, np.ndarray, Index)): + return [get_datevalue(x, axis.freq) for x in values] + return values + + +def get_datevalue(date, freq): + if isinstance(date, Period): + return date.asfreq(freq).ordinal + elif isinstance(date, (str, datetime, pydt.date, pydt.time, np.datetime64)): + return Period(date, freq).ordinal + elif ( + is_integer(date) + or is_float(date) + or (isinstance(date, (np.ndarray, Index)) and (date.size == 1)) + ): + return date + elif date is None: + return None + raise ValueError(f"Unrecognizable date '{date}'") + + +# Datetime Conversion +class DatetimeConverter(mdates.DateConverter): + @staticmethod + def convert(values, unit, axis): + # values might be a 1-d array, or a list-like of arrays. + if is_nested_list_like(values): + values = [DatetimeConverter._convert_1d(v, unit, axis) for v in values] + else: + values = DatetimeConverter._convert_1d(values, unit, axis) + return values + + @staticmethod + def _convert_1d(values, unit, axis): + def try_parse(values): + try: + return mdates.date2num(tools.to_datetime(values)) + except Exception: + return values + + if isinstance(values, (datetime, pydt.date, np.datetime64, pydt.time)): + return mdates.date2num(values) + elif is_integer(values) or is_float(values): + return values + elif isinstance(values, str): + return try_parse(values) + elif isinstance(values, (list, tuple, np.ndarray, Index, Series)): + if isinstance(values, Series): + # https://github.com/matplotlib/matplotlib/issues/11391 + # Series was skipped. Convert to DatetimeIndex to get asi8 + values = Index(values) + if isinstance(values, Index): + values = values.values + if not isinstance(values, np.ndarray): + values = com.asarray_tuplesafe(values) + + if is_integer_dtype(values) or is_float_dtype(values): + return values + + try: + values = tools.to_datetime(values) + except Exception: + pass + + values = mdates.date2num(values) + + return values + + @staticmethod + def axisinfo(unit: tzinfo | None, axis) -> munits.AxisInfo: + """ + Return the :class:`~matplotlib.units.AxisInfo` for *unit*. + + *unit* is a tzinfo instance or None. + The *axis* argument is required but not used. + """ + tz = unit + + majloc = PandasAutoDateLocator(tz=tz) + majfmt = PandasAutoDateFormatter(majloc, tz=tz) + datemin = pydt.date(2000, 1, 1) + datemax = pydt.date(2010, 1, 1) + + return munits.AxisInfo( + majloc=majloc, majfmt=majfmt, label="", default_limits=(datemin, datemax) + ) + + +class PandasAutoDateFormatter(mdates.AutoDateFormatter): + def __init__(self, locator, tz=None, defaultfmt: str = "%Y-%m-%d") -> None: + mdates.AutoDateFormatter.__init__(self, locator, tz, defaultfmt) + + +class PandasAutoDateLocator(mdates.AutoDateLocator): + def get_locator(self, dmin, dmax): + """Pick the best locator based on a distance.""" + tot_sec = (dmax - dmin).total_seconds() + + if abs(tot_sec) < self.minticks: + self._freq = -1 + locator = MilliSecondLocator(self.tz) + locator.set_axis(self.axis) + + # error: Item "None" of "Axis | _DummyAxis | _AxisWrapper | None" + # has no attribute "get_data_interval" + locator.axis.set_view_interval( # type: ignore[union-attr] + *self.axis.get_view_interval() # type: ignore[union-attr] + ) + locator.axis.set_data_interval( # type: ignore[union-attr] + *self.axis.get_data_interval() # type: ignore[union-attr] + ) + return locator + + return mdates.AutoDateLocator.get_locator(self, dmin, dmax) + + def _get_unit(self): + return MilliSecondLocator.get_unit_generic(self._freq) + + +class MilliSecondLocator(mdates.DateLocator): + UNIT = 1.0 / (24 * 3600 * 1000) + + def __init__(self, tz) -> None: + mdates.DateLocator.__init__(self, tz) + self._interval = 1.0 + + def _get_unit(self): + return self.get_unit_generic(-1) + + @staticmethod + def get_unit_generic(freq): + unit = mdates.RRuleLocator.get_unit_generic(freq) + if unit < 0: + return MilliSecondLocator.UNIT + return unit + + def __call__(self): + # if no data have been set, this will tank with a ValueError + try: + dmin, dmax = self.viewlim_to_dt() + except ValueError: + return [] + + # We need to cap at the endpoints of valid datetime + nmax, nmin = mdates.date2num((dmax, dmin)) + + num = (nmax - nmin) * 86400 * 1000 + max_millis_ticks = 6 + for interval in [1, 10, 50, 100, 200, 500]: + if num <= interval * (max_millis_ticks - 1): + self._interval = interval + break + # We went through the whole loop without breaking, default to 1 + self._interval = 1000.0 + + estimate = (nmax - nmin) / (self._get_unit() * self._get_interval()) + + if estimate > self.MAXTICKS * 2: + raise RuntimeError( + "MillisecondLocator estimated to generate " + f"{estimate:d} ticks from {dmin} to {dmax}: exceeds Locator.MAXTICKS" + f"* 2 ({self.MAXTICKS * 2:d}) " + ) + + interval = self._get_interval() + freq = f"{interval}ms" + tz = self.tz.tzname(None) + st = dmin.replace(tzinfo=None) + ed = dmin.replace(tzinfo=None) + all_dates = date_range(start=st, end=ed, freq=freq, tz=tz).astype(object) + + try: + if len(all_dates) > 0: + locs = self.raise_if_exceeds(mdates.date2num(all_dates)) + return locs + except Exception: # pragma: no cover + pass + + lims = mdates.date2num([dmin, dmax]) + return lims + + def _get_interval(self): + return self._interval + + def autoscale(self): + """ + Set the view limits to include the data range. + """ + # We need to cap at the endpoints of valid datetime + dmin, dmax = self.datalim_to_dt() + + vmin = mdates.date2num(dmin) + vmax = mdates.date2num(dmax) + + return self.nonsingular(vmin, vmax) + + +def _from_ordinal(x, tz: tzinfo | None = None) -> datetime: + ix = int(x) + dt = datetime.fromordinal(ix) + remainder = float(x) - ix + hour, remainder = divmod(24 * remainder, 1) + minute, remainder = divmod(60 * remainder, 1) + second, remainder = divmod(60 * remainder, 1) + microsecond = int(1_000_000 * remainder) + if microsecond < 10: + microsecond = 0 # compensate for rounding errors + dt = datetime( + dt.year, dt.month, dt.day, int(hour), int(minute), int(second), microsecond + ) + if tz is not None: + dt = dt.astimezone(tz) + + if microsecond > 999990: # compensate for rounding errors + dt += timedelta(microseconds=1_000_000 - microsecond) + + return dt + + +# Fixed frequency dynamic tick locators and formatters + +# ------------------------------------------------------------------------- +# --- Locators --- +# ------------------------------------------------------------------------- + + +def _get_default_annual_spacing(nyears) -> tuple[int, int]: + """ + Returns a default spacing between consecutive ticks for annual data. + """ + if nyears < 11: + (min_spacing, maj_spacing) = (1, 1) + elif nyears < 20: + (min_spacing, maj_spacing) = (1, 2) + elif nyears < 50: + (min_spacing, maj_spacing) = (1, 5) + elif nyears < 100: + (min_spacing, maj_spacing) = (5, 10) + elif nyears < 200: + (min_spacing, maj_spacing) = (5, 25) + elif nyears < 600: + (min_spacing, maj_spacing) = (10, 50) + else: + factor = nyears // 1000 + 1 + (min_spacing, maj_spacing) = (factor * 20, factor * 100) + return (min_spacing, maj_spacing) + + +def _period_break(dates: PeriodIndex, period: str) -> npt.NDArray[np.intp]: + """ + Returns the indices where the given period changes. + + Parameters + ---------- + dates : PeriodIndex + Array of intervals to monitor. + period : str + Name of the period to monitor. + """ + mask = _period_break_mask(dates, period) + return np.nonzero(mask)[0] + + +def _period_break_mask(dates: PeriodIndex, period: str) -> npt.NDArray[np.bool_]: + current = getattr(dates, period) + previous = getattr(dates - 1 * dates.freq, period) + return current != previous + + +def has_level_label(label_flags: npt.NDArray[np.intp], vmin: float) -> bool: + """ + Returns true if the ``label_flags`` indicate there is at least one label + for this level. + + if the minimum view limit is not an exact integer, then the first tick + label won't be shown, so we must adjust for that. + """ + if label_flags.size == 0 or ( + label_flags.size == 1 and label_flags[0] == 0 and vmin % 1 > 0.0 + ): + return False + else: + return True + + +def _get_periods_per_ymd(freq: BaseOffset) -> tuple[int, int, int]: + # error: "BaseOffset" has no attribute "_period_dtype_code" + dtype_code = freq._period_dtype_code # type: ignore[attr-defined] + freq_group = FreqGroup.from_period_dtype_code(dtype_code) + + ppd = -1 # placeholder for above-day freqs + + if dtype_code >= FreqGroup.FR_HR.value: + # error: "BaseOffset" has no attribute "_creso" + ppd = periods_per_day(freq._creso) # type: ignore[attr-defined] + ppm = 28 * ppd + ppy = 365 * ppd + elif freq_group == FreqGroup.FR_BUS: + ppm = 19 + ppy = 261 + elif freq_group == FreqGroup.FR_DAY: + ppm = 28 + ppy = 365 + elif freq_group == FreqGroup.FR_WK: + ppm = 3 + ppy = 52 + elif freq_group == FreqGroup.FR_MTH: + ppm = 1 + ppy = 12 + elif freq_group == FreqGroup.FR_QTR: + ppm = -1 # placerholder + ppy = 4 + elif freq_group == FreqGroup.FR_ANN: + ppm = -1 # placeholder + ppy = 1 + else: + raise NotImplementedError(f"Unsupported frequency: {dtype_code}") + + return ppd, ppm, ppy + + +@functools.cache +def _daily_finder(vmin: float, vmax: float, freq: BaseOffset) -> np.ndarray: + # error: "BaseOffset" has no attribute "_period_dtype_code" + dtype_code = freq._period_dtype_code # type: ignore[attr-defined] + + periodsperday, periodspermonth, periodsperyear = _get_periods_per_ymd(freq) + + # save this for later usage + vmin_orig = vmin + (vmin, vmax) = (int(vmin), int(vmax)) + span = vmax - vmin + 1 + + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "Period with BDay freq is deprecated", category=FutureWarning + ) + warnings.filterwarnings( + "ignore", r"PeriodDtype\[B\] is deprecated", category=FutureWarning + ) + dates_ = period_range( + start=Period(ordinal=vmin, freq=freq), + end=Period(ordinal=vmax, freq=freq), + freq=freq, + ) + + # Initialize the output + info = np.zeros( + span, dtype=[("val", np.int64), ("maj", bool), ("min", bool), ("fmt", "|S20")] + ) + info["val"][:] = dates_.asi8 + info["fmt"][:] = "" + info["maj"][[0, -1]] = True + # .. and set some shortcuts + info_maj = info["maj"] + info_min = info["min"] + info_fmt = info["fmt"] + + def first_label(label_flags): + if (label_flags[0] == 0) and (label_flags.size > 1) and ((vmin_orig % 1) > 0.0): + return label_flags[1] + else: + return label_flags[0] + + # Case 1. Less than a month + if span <= periodspermonth: + day_start = _period_break(dates_, "day") + month_start = _period_break(dates_, "month") + year_start = _period_break(dates_, "year") + + def _hour_finder(label_interval: int, force_year_start: bool) -> None: + target = dates_.hour + mask = _period_break_mask(dates_, "hour") + info_maj[day_start] = True + info_min[mask & (target % label_interval == 0)] = True + info_fmt[mask & (target % label_interval == 0)] = "%H:%M" + info_fmt[day_start] = "%H:%M\n%d-%b" + info_fmt[year_start] = "%H:%M\n%d-%b\n%Y" + if force_year_start and not has_level_label(year_start, vmin_orig): + info_fmt[first_label(day_start)] = "%H:%M\n%d-%b\n%Y" + + def _minute_finder(label_interval: int) -> None: + target = dates_.minute + hour_start = _period_break(dates_, "hour") + mask = _period_break_mask(dates_, "minute") + info_maj[hour_start] = True + info_min[mask & (target % label_interval == 0)] = True + info_fmt[mask & (target % label_interval == 0)] = "%H:%M" + info_fmt[day_start] = "%H:%M\n%d-%b" + info_fmt[year_start] = "%H:%M\n%d-%b\n%Y" + + def _second_finder(label_interval: int) -> None: + target = dates_.second + minute_start = _period_break(dates_, "minute") + mask = _period_break_mask(dates_, "second") + info_maj[minute_start] = True + info_min[mask & (target % label_interval == 0)] = True + info_fmt[mask & (target % label_interval == 0)] = "%H:%M:%S" + info_fmt[day_start] = "%H:%M:%S\n%d-%b" + info_fmt[year_start] = "%H:%M:%S\n%d-%b\n%Y" + + if span < periodsperday / 12000: + _second_finder(1) + elif span < periodsperday / 6000: + _second_finder(2) + elif span < periodsperday / 2400: + _second_finder(5) + elif span < periodsperday / 1200: + _second_finder(10) + elif span < periodsperday / 800: + _second_finder(15) + elif span < periodsperday / 400: + _second_finder(30) + elif span < periodsperday / 150: + _minute_finder(1) + elif span < periodsperday / 70: + _minute_finder(2) + elif span < periodsperday / 24: + _minute_finder(5) + elif span < periodsperday / 12: + _minute_finder(15) + elif span < periodsperday / 6: + _minute_finder(30) + elif span < periodsperday / 2.5: + _hour_finder(1, False) + elif span < periodsperday / 1.5: + _hour_finder(2, False) + elif span < periodsperday * 1.25: + _hour_finder(3, False) + elif span < periodsperday * 2.5: + _hour_finder(6, True) + elif span < periodsperday * 4: + _hour_finder(12, True) + else: + info_maj[month_start] = True + info_min[day_start] = True + info_fmt[day_start] = "%d" + info_fmt[month_start] = "%d\n%b" + info_fmt[year_start] = "%d\n%b\n%Y" + if not has_level_label(year_start, vmin_orig): + if not has_level_label(month_start, vmin_orig): + info_fmt[first_label(day_start)] = "%d\n%b\n%Y" + else: + info_fmt[first_label(month_start)] = "%d\n%b\n%Y" + + # Case 2. Less than three months + elif span <= periodsperyear // 4: + month_start = _period_break(dates_, "month") + info_maj[month_start] = True + if dtype_code < FreqGroup.FR_HR.value: + info["min"] = True + else: + day_start = _period_break(dates_, "day") + info["min"][day_start] = True + week_start = _period_break(dates_, "week") + year_start = _period_break(dates_, "year") + info_fmt[week_start] = "%d" + info_fmt[month_start] = "\n\n%b" + info_fmt[year_start] = "\n\n%b\n%Y" + if not has_level_label(year_start, vmin_orig): + if not has_level_label(month_start, vmin_orig): + info_fmt[first_label(week_start)] = "\n\n%b\n%Y" + else: + info_fmt[first_label(month_start)] = "\n\n%b\n%Y" + # Case 3. Less than 14 months ............... + elif span <= 1.15 * periodsperyear: + year_start = _period_break(dates_, "year") + month_start = _period_break(dates_, "month") + week_start = _period_break(dates_, "week") + info_maj[month_start] = True + info_min[week_start] = True + info_min[year_start] = False + info_min[month_start] = False + info_fmt[month_start] = "%b" + info_fmt[year_start] = "%b\n%Y" + if not has_level_label(year_start, vmin_orig): + info_fmt[first_label(month_start)] = "%b\n%Y" + # Case 4. Less than 2.5 years ............... + elif span <= 2.5 * periodsperyear: + year_start = _period_break(dates_, "year") + quarter_start = _period_break(dates_, "quarter") + month_start = _period_break(dates_, "month") + info_maj[quarter_start] = True + info_min[month_start] = True + info_fmt[quarter_start] = "%b" + info_fmt[year_start] = "%b\n%Y" + # Case 4. Less than 4 years ................. + elif span <= 4 * periodsperyear: + year_start = _period_break(dates_, "year") + month_start = _period_break(dates_, "month") + info_maj[year_start] = True + info_min[month_start] = True + info_min[year_start] = False + + month_break = dates_[month_start].month + jan_or_jul = month_start[(month_break == 1) | (month_break == 7)] + info_fmt[jan_or_jul] = "%b" + info_fmt[year_start] = "%b\n%Y" + # Case 5. Less than 11 years ................ + elif span <= 11 * periodsperyear: + year_start = _period_break(dates_, "year") + quarter_start = _period_break(dates_, "quarter") + info_maj[year_start] = True + info_min[quarter_start] = True + info_min[year_start] = False + info_fmt[year_start] = "%Y" + # Case 6. More than 12 years ................ + else: + year_start = _period_break(dates_, "year") + year_break = dates_[year_start].year + nyears = span / periodsperyear + (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) + major_idx = year_start[(year_break % maj_anndef == 0)] + info_maj[major_idx] = True + minor_idx = year_start[(year_break % min_anndef == 0)] + info_min[minor_idx] = True + info_fmt[major_idx] = "%Y" + + return info + + +@functools.cache +def _monthly_finder(vmin: float, vmax: float, freq: BaseOffset) -> np.ndarray: + _, _, periodsperyear = _get_periods_per_ymd(freq) + + vmin_orig = vmin + (vmin, vmax) = (int(vmin), int(vmax)) + span = vmax - vmin + 1 + + # Initialize the output + info = np.zeros( + span, dtype=[("val", int), ("maj", bool), ("min", bool), ("fmt", "|S8")] + ) + info["val"] = np.arange(vmin, vmax + 1) + dates_ = info["val"] + info["fmt"] = "" + year_start = (dates_ % 12 == 0).nonzero()[0] + info_maj = info["maj"] + info_fmt = info["fmt"] + + if span <= 1.15 * periodsperyear: + info_maj[year_start] = True + info["min"] = True + + info_fmt[:] = "%b" + info_fmt[year_start] = "%b\n%Y" + + if not has_level_label(year_start, vmin_orig): + if dates_.size > 1: + idx = 1 + else: + idx = 0 + info_fmt[idx] = "%b\n%Y" + + elif span <= 2.5 * periodsperyear: + quarter_start = (dates_ % 3 == 0).nonzero() + info_maj[year_start] = True + # TODO: Check the following : is it really info['fmt'] ? + # 2023-09-15 this is reached in test_finder_monthly + info["fmt"][quarter_start] = True + info["min"] = True + + info_fmt[quarter_start] = "%b" + info_fmt[year_start] = "%b\n%Y" + + elif span <= 4 * periodsperyear: + info_maj[year_start] = True + info["min"] = True + + jan_or_jul = (dates_ % 12 == 0) | (dates_ % 12 == 6) + info_fmt[jan_or_jul] = "%b" + info_fmt[year_start] = "%b\n%Y" + + elif span <= 11 * periodsperyear: + quarter_start = (dates_ % 3 == 0).nonzero() + info_maj[year_start] = True + info["min"][quarter_start] = True + + info_fmt[year_start] = "%Y" + + else: + nyears = span / periodsperyear + (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) + years = dates_[year_start] // 12 + 1 + major_idx = year_start[(years % maj_anndef == 0)] + info_maj[major_idx] = True + info["min"][year_start[(years % min_anndef == 0)]] = True + + info_fmt[major_idx] = "%Y" + + return info + + +@functools.cache +def _quarterly_finder(vmin: float, vmax: float, freq: BaseOffset) -> np.ndarray: + _, _, periodsperyear = _get_periods_per_ymd(freq) + vmin_orig = vmin + (vmin, vmax) = (int(vmin), int(vmax)) + span = vmax - vmin + 1 + + info = np.zeros( + span, dtype=[("val", int), ("maj", bool), ("min", bool), ("fmt", "|S8")] + ) + info["val"] = np.arange(vmin, vmax + 1) + info["fmt"] = "" + dates_ = info["val"] + info_maj = info["maj"] + info_fmt = info["fmt"] + year_start = (dates_ % 4 == 0).nonzero()[0] + + if span <= 3.5 * periodsperyear: + info_maj[year_start] = True + info["min"] = True + + info_fmt[:] = "Q%q" + info_fmt[year_start] = "Q%q\n%F" + if not has_level_label(year_start, vmin_orig): + if dates_.size > 1: + idx = 1 + else: + idx = 0 + info_fmt[idx] = "Q%q\n%F" + + elif span <= 11 * periodsperyear: + info_maj[year_start] = True + info["min"] = True + info_fmt[year_start] = "%F" + + else: + # https://github.com/pandas-dev/pandas/pull/47602 + years = dates_[year_start] // 4 + 1970 + nyears = span / periodsperyear + (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) + major_idx = year_start[(years % maj_anndef == 0)] + info_maj[major_idx] = True + info["min"][year_start[(years % min_anndef == 0)]] = True + info_fmt[major_idx] = "%F" + + return info + + +@functools.cache +def _annual_finder(vmin: float, vmax: float, freq: BaseOffset) -> np.ndarray: + # Note: small difference here vs other finders in adding 1 to vmax + (vmin, vmax) = (int(vmin), int(vmax + 1)) + span = vmax - vmin + 1 + + info = np.zeros( + span, dtype=[("val", int), ("maj", bool), ("min", bool), ("fmt", "|S8")] + ) + info["val"] = np.arange(vmin, vmax + 1) + info["fmt"] = "" + dates_ = info["val"] + + (min_anndef, maj_anndef) = _get_default_annual_spacing(span) + major_idx = dates_ % maj_anndef == 0 + minor_idx = dates_ % min_anndef == 0 + info["maj"][major_idx] = True + info["min"][minor_idx] = True + info["fmt"][major_idx] = "%Y" + + return info + + +def get_finder(freq: BaseOffset): + # error: "BaseOffset" has no attribute "_period_dtype_code" + dtype_code = freq._period_dtype_code # type: ignore[attr-defined] + fgroup = FreqGroup.from_period_dtype_code(dtype_code) + + if fgroup == FreqGroup.FR_ANN: + return _annual_finder + elif fgroup == FreqGroup.FR_QTR: + return _quarterly_finder + elif fgroup == FreqGroup.FR_MTH: + return _monthly_finder + elif (dtype_code >= FreqGroup.FR_BUS.value) or fgroup == FreqGroup.FR_WK: + return _daily_finder + else: # pragma: no cover + raise NotImplementedError(f"Unsupported frequency: {dtype_code}") + + +class TimeSeries_DateLocator(Locator): + """ + Locates the ticks along an axis controlled by a :class:`Series`. + + Parameters + ---------- + freq : BaseOffset + Valid frequency specifier. + minor_locator : {False, True}, optional + Whether the locator is for minor ticks (True) or not. + dynamic_mode : {True, False}, optional + Whether the locator should work in dynamic mode. + base : {int}, optional + quarter : {int}, optional + month : {int}, optional + day : {int}, optional + """ + + axis: Axis + + def __init__( + self, + freq: BaseOffset, + minor_locator: bool = False, + dynamic_mode: bool = True, + base: int = 1, + quarter: int = 1, + month: int = 1, + day: int = 1, + plot_obj=None, + ) -> None: + freq = to_offset(freq, is_period=True) + self.freq = freq + self.base = base + (self.quarter, self.month, self.day) = (quarter, month, day) + self.isminor = minor_locator + self.isdynamic = dynamic_mode + self.offset = 0 + self.plot_obj = plot_obj + self.finder = get_finder(freq) + + def _get_default_locs(self, vmin, vmax): + """Returns the default locations of ticks.""" + locator = self.finder(vmin, vmax, self.freq) + + if self.isminor: + return np.compress(locator["min"], locator["val"]) + return np.compress(locator["maj"], locator["val"]) + + def __call__(self): + """Return the locations of the ticks.""" + # axis calls Locator.set_axis inside set_m_formatter + + vi = tuple(self.axis.get_view_interval()) + vmin, vmax = vi + if vmax < vmin: + vmin, vmax = vmax, vmin + if self.isdynamic: + locs = self._get_default_locs(vmin, vmax) + else: # pragma: no cover + base = self.base + (d, m) = divmod(vmin, base) + vmin = (d + 1) * base + # error: No overload variant of "range" matches argument types "float", + # "float", "int" + locs = list(range(vmin, vmax + 1, base)) # type: ignore[call-overload] + return locs + + def autoscale(self): + """ + Sets the view limits to the nearest multiples of base that contain the + data. + """ + # requires matplotlib >= 0.98.0 + (vmin, vmax) = self.axis.get_data_interval() + + locs = self._get_default_locs(vmin, vmax) + (vmin, vmax) = locs[[0, -1]] + if vmin == vmax: + vmin -= 1 + vmax += 1 + return nonsingular(vmin, vmax) + + +# ------------------------------------------------------------------------- +# --- Formatter --- +# ------------------------------------------------------------------------- + + +class TimeSeries_DateFormatter(Formatter): + """ + Formats the ticks along an axis controlled by a :class:`PeriodIndex`. + + Parameters + ---------- + freq : BaseOffset + Valid frequency specifier. + minor_locator : bool, default False + Whether the current formatter should apply to minor ticks (True) or + major ticks (False). + dynamic_mode : bool, default True + Whether the formatter works in dynamic mode or not. + """ + + axis: Axis + + def __init__( + self, + freq: BaseOffset, + minor_locator: bool = False, + dynamic_mode: bool = True, + plot_obj=None, + ) -> None: + freq = to_offset(freq, is_period=True) + self.format = None + self.freq = freq + self.locs: list[Any] = [] # unused, for matplotlib compat + self.formatdict: dict[Any, Any] | None = None + self.isminor = minor_locator + self.isdynamic = dynamic_mode + self.offset = 0 + self.plot_obj = plot_obj + self.finder = get_finder(freq) + + def _set_default_format(self, vmin, vmax): + """Returns the default ticks spacing.""" + info = self.finder(vmin, vmax, self.freq) + + if self.isminor: + format = np.compress(info["min"] & np.logical_not(info["maj"]), info) + else: + format = np.compress(info["maj"], info) + self.formatdict = {x: f for (x, _, _, f) in format} + return self.formatdict + + def set_locs(self, locs) -> None: + """Sets the locations of the ticks""" + # don't actually use the locs. This is just needed to work with + # matplotlib. Force to use vmin, vmax + + self.locs = locs + + (vmin, vmax) = tuple(self.axis.get_view_interval()) + if vmax < vmin: + (vmin, vmax) = (vmax, vmin) + self._set_default_format(vmin, vmax) + + def __call__(self, x, pos: int | None = 0) -> str: + if self.formatdict is None: + return "" + else: + fmt = self.formatdict.pop(x, "") + if isinstance(fmt, np.bytes_): + fmt = fmt.decode("utf-8") + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "Period with BDay freq is deprecated", + category=FutureWarning, + ) + period = Period(ordinal=int(x), freq=self.freq) + assert isinstance(period, Period) + return period.strftime(fmt) + + +class TimeSeries_TimedeltaFormatter(Formatter): + """ + Formats the ticks along an axis controlled by a :class:`TimedeltaIndex`. + """ + + axis: Axis + + @staticmethod + def format_timedelta_ticks(x, pos, n_decimals: int) -> str: + """ + Convert seconds to 'D days HH:MM:SS.F' + """ + s, ns = divmod(x, 10**9) # TODO(non-nano): this looks like it assumes ns + m, s = divmod(s, 60) + h, m = divmod(m, 60) + d, h = divmod(h, 24) + decimals = int(ns * 10 ** (n_decimals - 9)) + s = f"{int(h):02d}:{int(m):02d}:{int(s):02d}" + if n_decimals > 0: + s += f".{decimals:0{n_decimals}d}" + if d != 0: + s = f"{int(d):d} days {s}" + return s + + def __call__(self, x, pos: int | None = 0) -> str: + (vmin, vmax) = tuple(self.axis.get_view_interval()) + n_decimals = min(int(np.ceil(np.log10(100 * 10**9 / abs(vmax - vmin)))), 9) + return self.format_timedelta_ticks(x, pos, n_decimals) diff --git a/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/core.py b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/core.py new file mode 100644 index 0000000000000000000000000000000000000000..3a1e589c2279bdadb736ce85312bc2c84f5793eb --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/core.py @@ -0,0 +1,2125 @@ +from __future__ import annotations + +from abc import ( + ABC, + abstractmethod, +) +from collections.abc import ( + Hashable, + Iterable, + Iterator, + Sequence, +) +from typing import ( + TYPE_CHECKING, + Any, + Literal, + cast, + final, +) +import warnings + +import matplotlib as mpl +import numpy as np + +from pandas._libs import lib +from pandas.errors import AbstractMethodError +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + is_any_real_numeric_dtype, + is_bool, + is_float, + is_float_dtype, + is_hashable, + is_integer, + is_integer_dtype, + is_iterator, + is_list_like, + is_number, + is_numeric_dtype, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + ExtensionDtype, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCDatetimeIndex, + ABCIndex, + ABCMultiIndex, + ABCPeriodIndex, + ABCSeries, +) +from pandas.core.dtypes.missing import isna + +import pandas.core.common as com +from pandas.core.frame import DataFrame +from pandas.util.version import Version + +from pandas.io.formats.printing import pprint_thing +from pandas.plotting._matplotlib import tools +from pandas.plotting._matplotlib.converter import register_pandas_matplotlib_converters +from pandas.plotting._matplotlib.groupby import reconstruct_data_with_by +from pandas.plotting._matplotlib.misc import unpack_single_str_list +from pandas.plotting._matplotlib.style import get_standard_colors +from pandas.plotting._matplotlib.timeseries import ( + decorate_axes, + format_dateaxis, + maybe_convert_index, + maybe_resample, + use_dynamic_x, +) +from pandas.plotting._matplotlib.tools import ( + create_subplots, + flatten_axes, + format_date_labels, + get_all_lines, + get_xlim, + handle_shared_axes, +) + +if TYPE_CHECKING: + from matplotlib.artist import Artist + from matplotlib.axes import Axes + from matplotlib.axis import Axis + from matplotlib.figure import Figure + + from pandas._typing import ( + IndexLabel, + NDFrameT, + PlottingOrientation, + npt, + ) + + from pandas import Series + + +def _color_in_style(style: str) -> bool: + """ + Check if there is a color letter in the style string. + """ + from matplotlib.colors import BASE_COLORS + + return not set(BASE_COLORS).isdisjoint(style) + + +class MPLPlot(ABC): + """ + Base class for assembling a pandas plot using matplotlib + + Parameters + ---------- + data : + + """ + + @property + @abstractmethod + def _kind(self) -> str: + """Specify kind str. Must be overridden in child class""" + raise NotImplementedError + + _layout_type = "vertical" + _default_rot = 0 + + @property + def orientation(self) -> str | None: + return None + + data: DataFrame + + def __init__( + self, + data, + kind=None, + by: IndexLabel | None = None, + subplots: bool | Sequence[Sequence[str]] = False, + sharex: bool | None = None, + sharey: bool = False, + use_index: bool = True, + figsize: tuple[float, float] | None = None, + grid=None, + legend: bool | str = True, + rot=None, + ax=None, + fig=None, + title=None, + xlim=None, + ylim=None, + xticks=None, + yticks=None, + xlabel: Hashable | None = None, + ylabel: Hashable | None = None, + fontsize: int | None = None, + secondary_y: bool | tuple | list | np.ndarray = False, + colormap=None, + table: bool = False, + layout=None, + include_bool: bool = False, + column: IndexLabel | None = None, + *, + logx: bool | None | Literal["sym"] = False, + logy: bool | None | Literal["sym"] = False, + loglog: bool | None | Literal["sym"] = False, + mark_right: bool = True, + stacked: bool = False, + label: Hashable | None = None, + style=None, + **kwds, + ) -> None: + import matplotlib.pyplot as plt + + # if users assign an empty list or tuple, raise `ValueError` + # similar to current `df.box` and `df.hist` APIs. + if by in ([], ()): + raise ValueError("No group keys passed!") + self.by = com.maybe_make_list(by) + + # Assign the rest of columns into self.columns if by is explicitly defined + # while column is not, only need `columns` in hist/box plot when it's DF + # TODO: Might deprecate `column` argument in future PR (#28373) + if isinstance(data, DataFrame): + if column: + self.columns = com.maybe_make_list(column) + elif self.by is None: + self.columns = [ + col for col in data.columns if is_numeric_dtype(data[col]) + ] + else: + self.columns = [ + col + for col in data.columns + if col not in self.by and is_numeric_dtype(data[col]) + ] + + # For `hist` plot, need to get grouped original data before `self.data` is + # updated later + if self.by is not None and self._kind == "hist": + self._grouped = data.groupby(unpack_single_str_list(self.by)) + + self.kind = kind + + self.subplots = type(self)._validate_subplots_kwarg( + subplots, data, kind=self._kind + ) + + self.sharex = type(self)._validate_sharex(sharex, ax, by) + self.sharey = sharey + self.figsize = figsize + self.layout = layout + + self.xticks = xticks + self.yticks = yticks + self.xlim = xlim + self.ylim = ylim + self.title = title + self.use_index = use_index + self.xlabel = xlabel + self.ylabel = ylabel + + self.fontsize = fontsize + + if rot is not None: + self.rot = rot + # need to know for format_date_labels since it's rotated to 30 by + # default + self._rot_set = True + else: + self._rot_set = False + self.rot = self._default_rot + + if grid is None: + grid = False if secondary_y else plt.rcParams["axes.grid"] + + self.grid = grid + self.legend = legend + self.legend_handles: list[Artist] = [] + self.legend_labels: list[Hashable] = [] + + self.logx = type(self)._validate_log_kwd("logx", logx) + self.logy = type(self)._validate_log_kwd("logy", logy) + self.loglog = type(self)._validate_log_kwd("loglog", loglog) + self.label = label + self.style = style + self.mark_right = mark_right + self.stacked = stacked + + # ax may be an Axes object or (if self.subplots) an ndarray of + # Axes objects + self.ax = ax + # TODO: deprecate fig keyword as it is ignored, not passed in tests + # as of 2023-11-05 + + # parse errorbar input if given + xerr = kwds.pop("xerr", None) + yerr = kwds.pop("yerr", None) + nseries = self._get_nseries(data) + xerr, data = type(self)._parse_errorbars("xerr", xerr, data, nseries) + yerr, data = type(self)._parse_errorbars("yerr", yerr, data, nseries) + self.errors = {"xerr": xerr, "yerr": yerr} + self.data = data + + if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, ABCIndex)): + secondary_y = [secondary_y] + self.secondary_y = secondary_y + + # ugly TypeError if user passes matplotlib's `cmap` name. + # Probably better to accept either. + if "cmap" in kwds and colormap: + raise TypeError("Only specify one of `cmap` and `colormap`.") + if "cmap" in kwds: + self.colormap = kwds.pop("cmap") + else: + self.colormap = colormap + + self.table = table + self.include_bool = include_bool + + self.kwds = kwds + + color = kwds.pop("color", lib.no_default) + self.color = self._validate_color_args(color, self.colormap) + assert "color" not in self.kwds + + self.data = self._ensure_frame(self.data) + + @final + @staticmethod + def _validate_sharex(sharex: bool | None, ax, by) -> bool: + if sharex is None: + # if by is defined, subplots are used and sharex should be False + if ax is None and by is None: # pylint: disable=simplifiable-if-statement + sharex = True + else: + # if we get an axis, the users should do the visibility + # setting... + sharex = False + elif not is_bool(sharex): + raise TypeError("sharex must be a bool or None") + return bool(sharex) + + @classmethod + def _validate_log_kwd( + cls, + kwd: str, + value: bool | None | Literal["sym"], + ) -> bool | None | Literal["sym"]: + if ( + value is None + or isinstance(value, bool) + or (isinstance(value, str) and value == "sym") + ): + return value + raise ValueError( + f"keyword '{kwd}' should be bool, None, or 'sym', not '{value}'" + ) + + @final + @staticmethod + def _validate_subplots_kwarg( + subplots: bool | Sequence[Sequence[str]], data: Series | DataFrame, kind: str + ) -> bool | list[tuple[int, ...]]: + """ + Validate the subplots parameter + + - check type and content + - check for duplicate columns + - check for invalid column names + - convert column names into indices + - add missing columns in a group of their own + See comments in code below for more details. + + Parameters + ---------- + subplots : subplots parameters as passed to PlotAccessor + + Returns + ------- + validated subplots : a bool or a list of tuples of column indices. Columns + in the same tuple will be grouped together in the resulting plot. + """ + + if isinstance(subplots, bool): + return subplots + elif not isinstance(subplots, Iterable): + raise ValueError("subplots should be a bool or an iterable") + + supported_kinds = ( + "line", + "bar", + "barh", + "hist", + "kde", + "density", + "area", + "pie", + ) + if kind not in supported_kinds: + raise ValueError( + "When subplots is an iterable, kind must be " + f"one of {', '.join(supported_kinds)}. Got {kind}." + ) + + if isinstance(data, ABCSeries): + raise NotImplementedError( + "An iterable subplots for a Series is not supported." + ) + + columns = data.columns + if isinstance(columns, ABCMultiIndex): + raise NotImplementedError( + "An iterable subplots for a DataFrame with a MultiIndex column " + "is not supported." + ) + + if columns.nunique() != len(columns): + raise NotImplementedError( + "An iterable subplots for a DataFrame with non-unique column " + "labels is not supported." + ) + + # subplots is a list of tuples where each tuple is a group of + # columns to be grouped together (one ax per group). + # we consolidate the subplots list such that: + # - the tuples contain indices instead of column names + # - the columns that aren't yet in the list are added in a group + # of their own. + # For example with columns from a to g, and + # subplots = [(a, c), (b, f, e)], + # we end up with [(ai, ci), (bi, fi, ei), (di,), (gi,)] + # This way, we can handle self.subplots in a homogeneous manner + # later. + # TODO: also accept indices instead of just names? + + out = [] + seen_columns: set[Hashable] = set() + for group in subplots: + if not is_list_like(group): + raise ValueError( + "When subplots is an iterable, each entry " + "should be a list/tuple of column names." + ) + idx_locs = columns.get_indexer_for(group) + if (idx_locs == -1).any(): + bad_labels = np.extract(idx_locs == -1, group) + raise ValueError( + f"Column label(s) {list(bad_labels)} not found in the DataFrame." + ) + unique_columns = set(group) + duplicates = seen_columns.intersection(unique_columns) + if duplicates: + raise ValueError( + "Each column should be in only one subplot. " + f"Columns {duplicates} were found in multiple subplots." + ) + seen_columns = seen_columns.union(unique_columns) + out.append(tuple(idx_locs)) + + unseen_columns = columns.difference(seen_columns) + for column in unseen_columns: + idx_loc = columns.get_loc(column) + out.append((idx_loc,)) + return out + + def _validate_color_args(self, color, colormap): + if color is lib.no_default: + # It was not provided by the user + if "colors" in self.kwds and colormap is not None: + warnings.warn( + "'color' and 'colormap' cannot be used simultaneously. " + "Using 'color'", + stacklevel=find_stack_level(), + ) + return None + if self.nseries == 1 and color is not None and not is_list_like(color): + # support series.plot(color='green') + color = [color] + + if isinstance(color, tuple) and self.nseries == 1 and len(color) in (3, 4): + # support RGB and RGBA tuples in series plot + color = [color] + + if colormap is not None: + warnings.warn( + "'color' and 'colormap' cannot be used simultaneously. Using 'color'", + stacklevel=find_stack_level(), + ) + + if self.style is not None: + if is_list_like(self.style): + styles = self.style + else: + styles = [self.style] + # need only a single match + for s in styles: + if _color_in_style(s): + raise ValueError( + "Cannot pass 'style' string with a color symbol and " + "'color' keyword argument. Please use one or the " + "other or pass 'style' without a color symbol" + ) + return color + + @final + @staticmethod + def _iter_data( + data: DataFrame | dict[Hashable, Series | DataFrame] + ) -> Iterator[tuple[Hashable, np.ndarray]]: + for col, values in data.items(): + # This was originally written to use values.values before EAs + # were implemented; adding np.asarray(...) to keep consistent + # typing. + yield col, np.asarray(values.values) + + def _get_nseries(self, data: Series | DataFrame) -> int: + # When `by` is explicitly assigned, grouped data size will be defined, and + # this will determine number of subplots to have, aka `self.nseries` + if data.ndim == 1: + return 1 + elif self.by is not None and self._kind == "hist": + return len(self._grouped) + elif self.by is not None and self._kind == "box": + return len(self.columns) + else: + return data.shape[1] + + @final + @property + def nseries(self) -> int: + return self._get_nseries(self.data) + + @final + def draw(self) -> None: + self.plt.draw_if_interactive() + + @final + def generate(self) -> None: + self._compute_plot_data() + fig = self.fig + self._make_plot(fig) + self._add_table() + self._make_legend() + self._adorn_subplots(fig) + + for ax in self.axes: + self._post_plot_logic_common(ax) + self._post_plot_logic(ax, self.data) + + @final + @staticmethod + def _has_plotted_object(ax: Axes) -> bool: + """check whether ax has data""" + return len(ax.lines) != 0 or len(ax.artists) != 0 or len(ax.containers) != 0 + + @final + def _maybe_right_yaxis(self, ax: Axes, axes_num: int) -> Axes: + if not self.on_right(axes_num): + # secondary axes may be passed via ax kw + return self._get_ax_layer(ax) + + if hasattr(ax, "right_ax"): + # if it has right_ax property, ``ax`` must be left axes + return ax.right_ax + elif hasattr(ax, "left_ax"): + # if it has left_ax property, ``ax`` must be right axes + return ax + else: + # otherwise, create twin axes + orig_ax, new_ax = ax, ax.twinx() + # TODO: use Matplotlib public API when available + new_ax._get_lines = orig_ax._get_lines # type: ignore[attr-defined] + # TODO #54485 + new_ax._get_patches_for_fill = ( # type: ignore[attr-defined] + orig_ax._get_patches_for_fill # type: ignore[attr-defined] + ) + # TODO #54485 + orig_ax.right_ax, new_ax.left_ax = ( # type: ignore[attr-defined] + new_ax, + orig_ax, + ) + + if not self._has_plotted_object(orig_ax): # no data on left y + orig_ax.get_yaxis().set_visible(False) + + if self.logy is True or self.loglog is True: + new_ax.set_yscale("log") + elif self.logy == "sym" or self.loglog == "sym": + new_ax.set_yscale("symlog") + return new_ax + + @final + @cache_readonly + def fig(self) -> Figure: + return self._axes_and_fig[1] + + @final + @cache_readonly + # TODO: can we annotate this as both a Sequence[Axes] and ndarray[object]? + def axes(self) -> Sequence[Axes]: + return self._axes_and_fig[0] + + @final + @cache_readonly + def _axes_and_fig(self) -> tuple[Sequence[Axes], Figure]: + if self.subplots: + naxes = ( + self.nseries if isinstance(self.subplots, bool) else len(self.subplots) + ) + fig, axes = create_subplots( + naxes=naxes, + sharex=self.sharex, + sharey=self.sharey, + figsize=self.figsize, + ax=self.ax, + layout=self.layout, + layout_type=self._layout_type, + ) + elif self.ax is None: + fig = self.plt.figure(figsize=self.figsize) + axes = fig.add_subplot(111) + else: + fig = self.ax.get_figure() + if self.figsize is not None: + fig.set_size_inches(self.figsize) + axes = self.ax + + axes = flatten_axes(axes) + + if self.logx is True or self.loglog is True: + [a.set_xscale("log") for a in axes] + elif self.logx == "sym" or self.loglog == "sym": + [a.set_xscale("symlog") for a in axes] + + if self.logy is True or self.loglog is True: + [a.set_yscale("log") for a in axes] + elif self.logy == "sym" or self.loglog == "sym": + [a.set_yscale("symlog") for a in axes] + + axes_seq = cast(Sequence["Axes"], axes) + return axes_seq, fig + + @property + def result(self): + """ + Return result axes + """ + if self.subplots: + if self.layout is not None and not is_list_like(self.ax): + # error: "Sequence[Any]" has no attribute "reshape" + return self.axes.reshape(*self.layout) # type: ignore[attr-defined] + else: + return self.axes + else: + sec_true = isinstance(self.secondary_y, bool) and self.secondary_y + # error: Argument 1 to "len" has incompatible type "Union[bool, + # Tuple[Any, ...], List[Any], ndarray[Any, Any]]"; expected "Sized" + all_sec = ( + is_list_like(self.secondary_y) + and len(self.secondary_y) == self.nseries # type: ignore[arg-type] + ) + if sec_true or all_sec: + # if all data is plotted on secondary, return right axes + return self._get_ax_layer(self.axes[0], primary=False) + else: + return self.axes[0] + + @final + @staticmethod + def _convert_to_ndarray(data): + # GH31357: categorical columns are processed separately + if isinstance(data.dtype, CategoricalDtype): + return data + + # GH32073: cast to float if values contain nulled integers + if (is_integer_dtype(data.dtype) or is_float_dtype(data.dtype)) and isinstance( + data.dtype, ExtensionDtype + ): + return data.to_numpy(dtype="float", na_value=np.nan) + + # GH25587: cast ExtensionArray of pandas (IntegerArray, etc.) to + # np.ndarray before plot. + if len(data) > 0: + return np.asarray(data) + + return data + + @final + def _ensure_frame(self, data) -> DataFrame: + if isinstance(data, ABCSeries): + label = self.label + if label is None and data.name is None: + label = "" + if label is None: + # We'll end up with columns of [0] instead of [None] + data = data.to_frame() + else: + data = data.to_frame(name=label) + elif self._kind in ("hist", "box"): + cols = self.columns if self.by is None else self.columns + self.by + data = data.loc[:, cols] + return data + + @final + def _compute_plot_data(self) -> None: + data = self.data + + # GH15079 reconstruct data if by is defined + if self.by is not None: + self.subplots = True + data = reconstruct_data_with_by(self.data, by=self.by, cols=self.columns) + + # GH16953, infer_objects is needed as fallback, for ``Series`` + # with ``dtype == object`` + data = data.infer_objects(copy=False) + include_type = [np.number, "datetime", "datetimetz", "timedelta"] + + # GH23719, allow plotting boolean + if self.include_bool is True: + include_type.append(np.bool_) + + # GH22799, exclude datetime-like type for boxplot + exclude_type = None + if self._kind == "box": + # TODO: change after solving issue 27881 + include_type = [np.number] + exclude_type = ["timedelta"] + + # GH 18755, include object and category type for scatter plot + if self._kind == "scatter": + include_type.extend(["object", "category", "string"]) + + numeric_data = data.select_dtypes(include=include_type, exclude=exclude_type) + + is_empty = numeric_data.shape[-1] == 0 + # no non-numeric frames or series allowed + if is_empty: + raise TypeError("no numeric data to plot") + + self.data = numeric_data.apply(type(self)._convert_to_ndarray) + + def _make_plot(self, fig: Figure) -> None: + raise AbstractMethodError(self) + + @final + def _add_table(self) -> None: + if self.table is False: + return + elif self.table is True: + data = self.data.transpose() + else: + data = self.table + ax = self._get_ax(0) + tools.table(ax, data) + + @final + def _post_plot_logic_common(self, ax: Axes) -> None: + """Common post process for each axes""" + if self.orientation == "vertical" or self.orientation is None: + type(self)._apply_axis_properties( + ax.xaxis, rot=self.rot, fontsize=self.fontsize + ) + type(self)._apply_axis_properties(ax.yaxis, fontsize=self.fontsize) + + if hasattr(ax, "right_ax"): + type(self)._apply_axis_properties( + ax.right_ax.yaxis, fontsize=self.fontsize + ) + + elif self.orientation == "horizontal": + type(self)._apply_axis_properties( + ax.yaxis, rot=self.rot, fontsize=self.fontsize + ) + type(self)._apply_axis_properties(ax.xaxis, fontsize=self.fontsize) + + if hasattr(ax, "right_ax"): + type(self)._apply_axis_properties( + ax.right_ax.yaxis, fontsize=self.fontsize + ) + else: # pragma no cover + raise ValueError + + @abstractmethod + def _post_plot_logic(self, ax: Axes, data) -> None: + """Post process for each axes. Overridden in child classes""" + + @final + def _adorn_subplots(self, fig: Figure) -> None: + """Common post process unrelated to data""" + if len(self.axes) > 0: + all_axes = self._get_subplots(fig) + nrows, ncols = self._get_axes_layout(fig) + handle_shared_axes( + axarr=all_axes, + nplots=len(all_axes), + naxes=nrows * ncols, + nrows=nrows, + ncols=ncols, + sharex=self.sharex, + sharey=self.sharey, + ) + + for ax in self.axes: + ax = getattr(ax, "right_ax", ax) + if self.yticks is not None: + ax.set_yticks(self.yticks) + + if self.xticks is not None: + ax.set_xticks(self.xticks) + + if self.ylim is not None: + ax.set_ylim(self.ylim) + + if self.xlim is not None: + ax.set_xlim(self.xlim) + + # GH9093, currently Pandas does not show ylabel, so if users provide + # ylabel will set it as ylabel in the plot. + if self.ylabel is not None: + ax.set_ylabel(pprint_thing(self.ylabel)) + + ax.grid(self.grid) + + if self.title: + if self.subplots: + if is_list_like(self.title): + if len(self.title) != self.nseries: + raise ValueError( + "The length of `title` must equal the number " + "of columns if using `title` of type `list` " + "and `subplots=True`.\n" + f"length of title = {len(self.title)}\n" + f"number of columns = {self.nseries}" + ) + + for ax, title in zip(self.axes, self.title): + ax.set_title(title) + else: + fig.suptitle(self.title) + else: + if is_list_like(self.title): + msg = ( + "Using `title` of type `list` is not supported " + "unless `subplots=True` is passed" + ) + raise ValueError(msg) + self.axes[0].set_title(self.title) + + @final + @staticmethod + def _apply_axis_properties( + axis: Axis, rot=None, fontsize: int | None = None + ) -> None: + """ + Tick creation within matplotlib is reasonably expensive and is + internally deferred until accessed as Ticks are created/destroyed + multiple times per draw. It's therefore beneficial for us to avoid + accessing unless we will act on the Tick. + """ + if rot is not None or fontsize is not None: + # rot=0 is a valid setting, hence the explicit None check + labels = axis.get_majorticklabels() + axis.get_minorticklabels() + for label in labels: + if rot is not None: + label.set_rotation(rot) + if fontsize is not None: + label.set_fontsize(fontsize) + + @final + @property + def legend_title(self) -> str | None: + if not isinstance(self.data.columns, ABCMultiIndex): + name = self.data.columns.name + if name is not None: + name = pprint_thing(name) + return name + else: + stringified = map(pprint_thing, self.data.columns.names) + return ",".join(stringified) + + @final + def _mark_right_label(self, label: str, index: int) -> str: + """ + Append ``(right)`` to the label of a line if it's plotted on the right axis. + + Note that ``(right)`` is only appended when ``subplots=False``. + """ + if not self.subplots and self.mark_right and self.on_right(index): + label += " (right)" + return label + + @final + def _append_legend_handles_labels(self, handle: Artist, label: str) -> None: + """ + Append current handle and label to ``legend_handles`` and ``legend_labels``. + + These will be used to make the legend. + """ + self.legend_handles.append(handle) + self.legend_labels.append(label) + + def _make_legend(self) -> None: + ax, leg = self._get_ax_legend(self.axes[0]) + + handles = [] + labels = [] + title = "" + + if not self.subplots: + if leg is not None: + title = leg.get_title().get_text() + # Replace leg.legend_handles because it misses marker info + if Version(mpl.__version__) < Version("3.7"): + handles = leg.legendHandles + else: + handles = leg.legend_handles + labels = [x.get_text() for x in leg.get_texts()] + + if self.legend: + if self.legend == "reverse": + handles += reversed(self.legend_handles) + labels += reversed(self.legend_labels) + else: + handles += self.legend_handles + labels += self.legend_labels + + if self.legend_title is not None: + title = self.legend_title + + if len(handles) > 0: + ax.legend(handles, labels, loc="best", title=title) + + elif self.subplots and self.legend: + for ax in self.axes: + if ax.get_visible(): + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "No artists with labels found to put in legend.", + UserWarning, + ) + ax.legend(loc="best") + + @final + @staticmethod + def _get_ax_legend(ax: Axes): + """ + Take in axes and return ax and legend under different scenarios + """ + leg = ax.get_legend() + + other_ax = getattr(ax, "left_ax", None) or getattr(ax, "right_ax", None) + other_leg = None + if other_ax is not None: + other_leg = other_ax.get_legend() + if leg is None and other_leg is not None: + leg = other_leg + ax = other_ax + return ax, leg + + @final + @cache_readonly + def plt(self): + import matplotlib.pyplot as plt + + return plt + + _need_to_set_index = False + + @final + def _get_xticks(self): + index = self.data.index + is_datetype = index.inferred_type in ("datetime", "date", "datetime64", "time") + + # TODO: be stricter about x? + x: list[int] | np.ndarray + if self.use_index: + if isinstance(index, ABCPeriodIndex): + # test_mixed_freq_irreg_period + x = index.to_timestamp()._mpl_repr() + # TODO: why do we need to do to_timestamp() here but not other + # places where we call mpl_repr? + elif is_any_real_numeric_dtype(index.dtype): + # Matplotlib supports numeric values or datetime objects as + # xaxis values. Taking LBYL approach here, by the time + # matplotlib raises exception when using non numeric/datetime + # values for xaxis, several actions are already taken by plt. + x = index._mpl_repr() + elif isinstance(index, ABCDatetimeIndex) or is_datetype: + x = index._mpl_repr() + else: + self._need_to_set_index = True + x = list(range(len(index))) + else: + x = list(range(len(index))) + + return x + + @classmethod + @register_pandas_matplotlib_converters + def _plot( + cls, ax: Axes, x, y: np.ndarray, style=None, is_errorbar: bool = False, **kwds + ): + mask = isna(y) + if mask.any(): + y = np.ma.array(y) + y = np.ma.masked_where(mask, y) + + if isinstance(x, ABCIndex): + x = x._mpl_repr() + + if is_errorbar: + if "xerr" in kwds: + kwds["xerr"] = np.array(kwds.get("xerr")) + if "yerr" in kwds: + kwds["yerr"] = np.array(kwds.get("yerr")) + return ax.errorbar(x, y, **kwds) + else: + # prevent style kwarg from going to errorbar, where it is unsupported + args = (x, y, style) if style is not None else (x, y) + return ax.plot(*args, **kwds) + + def _get_custom_index_name(self): + """Specify whether xlabel/ylabel should be used to override index name""" + return self.xlabel + + @final + def _get_index_name(self) -> str | None: + if isinstance(self.data.index, ABCMultiIndex): + name = self.data.index.names + if com.any_not_none(*name): + name = ",".join([pprint_thing(x) for x in name]) + else: + name = None + else: + name = self.data.index.name + if name is not None: + name = pprint_thing(name) + + # GH 45145, override the default axis label if one is provided. + index_name = self._get_custom_index_name() + if index_name is not None: + name = pprint_thing(index_name) + + return name + + @final + @classmethod + def _get_ax_layer(cls, ax, primary: bool = True): + """get left (primary) or right (secondary) axes""" + if primary: + return getattr(ax, "left_ax", ax) + else: + return getattr(ax, "right_ax", ax) + + @final + def _col_idx_to_axis_idx(self, col_idx: int) -> int: + """Return the index of the axis where the column at col_idx should be plotted""" + if isinstance(self.subplots, list): + # Subplots is a list: some columns will be grouped together in the same ax + return next( + group_idx + for (group_idx, group) in enumerate(self.subplots) + if col_idx in group + ) + else: + # subplots is True: one ax per column + return col_idx + + @final + def _get_ax(self, i: int): + # get the twinx ax if appropriate + if self.subplots: + i = self._col_idx_to_axis_idx(i) + ax = self.axes[i] + ax = self._maybe_right_yaxis(ax, i) + # error: Unsupported target for indexed assignment ("Sequence[Any]") + self.axes[i] = ax # type: ignore[index] + else: + ax = self.axes[0] + ax = self._maybe_right_yaxis(ax, i) + + ax.get_yaxis().set_visible(True) + return ax + + @final + def on_right(self, i: int): + if isinstance(self.secondary_y, bool): + return self.secondary_y + + if isinstance(self.secondary_y, (tuple, list, np.ndarray, ABCIndex)): + return self.data.columns[i] in self.secondary_y + + @final + def _apply_style_colors( + self, colors, kwds: dict[str, Any], col_num: int, label: str + ): + """ + Manage style and color based on column number and its label. + Returns tuple of appropriate style and kwds which "color" may be added. + """ + style = None + if self.style is not None: + if isinstance(self.style, list): + try: + style = self.style[col_num] + except IndexError: + pass + elif isinstance(self.style, dict): + style = self.style.get(label, style) + else: + style = self.style + + has_color = "color" in kwds or self.colormap is not None + nocolor_style = style is None or not _color_in_style(style) + if (has_color or self.subplots) and nocolor_style: + if isinstance(colors, dict): + kwds["color"] = colors[label] + else: + kwds["color"] = colors[col_num % len(colors)] + return style, kwds + + def _get_colors( + self, + num_colors: int | None = None, + color_kwds: str = "color", + ): + if num_colors is None: + num_colors = self.nseries + if color_kwds == "color": + color = self.color + else: + color = self.kwds.get(color_kwds) + return get_standard_colors( + num_colors=num_colors, + colormap=self.colormap, + color=color, + ) + + # TODO: tighter typing for first return? + @final + @staticmethod + def _parse_errorbars( + label: str, err, data: NDFrameT, nseries: int + ) -> tuple[Any, NDFrameT]: + """ + Look for error keyword arguments and return the actual errorbar data + or return the error DataFrame/dict + + Error bars can be specified in several ways: + Series: the user provides a pandas.Series object of the same + length as the data + ndarray: provides a np.ndarray of the same length as the data + DataFrame/dict: error values are paired with keys matching the + key in the plotted DataFrame + str: the name of the column within the plotted DataFrame + + Asymmetrical error bars are also supported, however raw error values + must be provided in this case. For a ``N`` length :class:`Series`, a + ``2xN`` array should be provided indicating lower and upper (or left + and right) errors. For a ``MxN`` :class:`DataFrame`, asymmetrical errors + should be in a ``Mx2xN`` array. + """ + if err is None: + return None, data + + def match_labels(data, e): + e = e.reindex(data.index) + return e + + # key-matched DataFrame + if isinstance(err, ABCDataFrame): + err = match_labels(data, err) + # key-matched dict + elif isinstance(err, dict): + pass + + # Series of error values + elif isinstance(err, ABCSeries): + # broadcast error series across data + err = match_labels(data, err) + err = np.atleast_2d(err) + err = np.tile(err, (nseries, 1)) + + # errors are a column in the dataframe + elif isinstance(err, str): + evalues = data[err].values + data = data[data.columns.drop(err)] + err = np.atleast_2d(evalues) + err = np.tile(err, (nseries, 1)) + + elif is_list_like(err): + if is_iterator(err): + err = np.atleast_2d(list(err)) + else: + # raw error values + err = np.atleast_2d(err) + + err_shape = err.shape + + # asymmetrical error bars + if isinstance(data, ABCSeries) and err_shape[0] == 2: + err = np.expand_dims(err, 0) + err_shape = err.shape + if err_shape[2] != len(data): + raise ValueError( + "Asymmetrical error bars should be provided " + f"with the shape (2, {len(data)})" + ) + elif isinstance(data, ABCDataFrame) and err.ndim == 3: + if ( + (err_shape[0] != nseries) + or (err_shape[1] != 2) + or (err_shape[2] != len(data)) + ): + raise ValueError( + "Asymmetrical error bars should be provided " + f"with the shape ({nseries}, 2, {len(data)})" + ) + + # broadcast errors to each data series + if len(err) == 1: + err = np.tile(err, (nseries, 1)) + + elif is_number(err): + err = np.tile( + [err], + (nseries, len(data)), + ) + + else: + msg = f"No valid {label} detected" + raise ValueError(msg) + + return err, data + + @final + def _get_errorbars( + self, label=None, index=None, xerr: bool = True, yerr: bool = True + ) -> dict[str, Any]: + errors = {} + + for kw, flag in zip(["xerr", "yerr"], [xerr, yerr]): + if flag: + err = self.errors[kw] + # user provided label-matched dataframe of errors + if isinstance(err, (ABCDataFrame, dict)): + if label is not None and label in err.keys(): + err = err[label] + else: + err = None + elif index is not None and err is not None: + err = err[index] + + if err is not None: + errors[kw] = err + return errors + + @final + def _get_subplots(self, fig: Figure): + if Version(mpl.__version__) < Version("3.8"): + from matplotlib.axes import Subplot as Klass + else: + from matplotlib.axes import Axes as Klass + + return [ + ax + for ax in fig.get_axes() + if (isinstance(ax, Klass) and ax.get_subplotspec() is not None) + ] + + @final + def _get_axes_layout(self, fig: Figure) -> tuple[int, int]: + axes = self._get_subplots(fig) + x_set = set() + y_set = set() + for ax in axes: + # check axes coordinates to estimate layout + points = ax.get_position().get_points() + x_set.add(points[0][0]) + y_set.add(points[0][1]) + return (len(y_set), len(x_set)) + + +class PlanePlot(MPLPlot, ABC): + """ + Abstract class for plotting on plane, currently scatter and hexbin. + """ + + _layout_type = "single" + + def __init__(self, data, x, y, **kwargs) -> None: + MPLPlot.__init__(self, data, **kwargs) + if x is None or y is None: + raise ValueError(self._kind + " requires an x and y column") + if is_integer(x) and not self.data.columns._holds_integer(): + x = self.data.columns[x] + if is_integer(y) and not self.data.columns._holds_integer(): + y = self.data.columns[y] + + self.x = x + self.y = y + + @final + def _get_nseries(self, data: Series | DataFrame) -> int: + return 1 + + @final + def _post_plot_logic(self, ax: Axes, data) -> None: + x, y = self.x, self.y + xlabel = self.xlabel if self.xlabel is not None else pprint_thing(x) + ylabel = self.ylabel if self.ylabel is not None else pprint_thing(y) + # error: Argument 1 to "set_xlabel" of "_AxesBase" has incompatible + # type "Hashable"; expected "str" + ax.set_xlabel(xlabel) # type: ignore[arg-type] + ax.set_ylabel(ylabel) # type: ignore[arg-type] + + @final + def _plot_colorbar(self, ax: Axes, *, fig: Figure, **kwds): + # Addresses issues #10611 and #10678: + # When plotting scatterplots and hexbinplots in IPython + # inline backend the colorbar axis height tends not to + # exactly match the parent axis height. + # The difference is due to small fractional differences + # in floating points with similar representation. + # To deal with this, this method forces the colorbar + # height to take the height of the parent axes. + # For a more detailed description of the issue + # see the following link: + # https://github.com/ipython/ipython/issues/11215 + + # GH33389, if ax is used multiple times, we should always + # use the last one which contains the latest information + # about the ax + img = ax.collections[-1] + return fig.colorbar(img, ax=ax, **kwds) + + +class ScatterPlot(PlanePlot): + @property + def _kind(self) -> Literal["scatter"]: + return "scatter" + + def __init__( + self, + data, + x, + y, + s=None, + c=None, + *, + colorbar: bool | lib.NoDefault = lib.no_default, + norm=None, + **kwargs, + ) -> None: + if s is None: + # hide the matplotlib default for size, in case we want to change + # the handling of this argument later + s = 20 + elif is_hashable(s) and s in data.columns: + s = data[s] + self.s = s + + self.colorbar = colorbar + self.norm = norm + + super().__init__(data, x, y, **kwargs) + if is_integer(c) and not self.data.columns._holds_integer(): + c = self.data.columns[c] + self.c = c + + def _make_plot(self, fig: Figure) -> None: + x, y, c, data = self.x, self.y, self.c, self.data + ax = self.axes[0] + + c_is_column = is_hashable(c) and c in self.data.columns + + color_by_categorical = c_is_column and isinstance( + self.data[c].dtype, CategoricalDtype + ) + + color = self.color + c_values = self._get_c_values(color, color_by_categorical, c_is_column) + norm, cmap = self._get_norm_and_cmap(c_values, color_by_categorical) + cb = self._get_colorbar(c_values, c_is_column) + + if self.legend: + label = self.label + else: + label = None + scatter = ax.scatter( + data[x].values, + data[y].values, + c=c_values, + label=label, + cmap=cmap, + norm=norm, + s=self.s, + **self.kwds, + ) + if cb: + cbar_label = c if c_is_column else "" + cbar = self._plot_colorbar(ax, fig=fig, label=cbar_label) + if color_by_categorical: + n_cats = len(self.data[c].cat.categories) + cbar.set_ticks(np.linspace(0.5, n_cats - 0.5, n_cats)) + cbar.ax.set_yticklabels(self.data[c].cat.categories) + + if label is not None: + self._append_legend_handles_labels( + # error: Argument 2 to "_append_legend_handles_labels" of + # "MPLPlot" has incompatible type "Hashable"; expected "str" + scatter, + label, # type: ignore[arg-type] + ) + + errors_x = self._get_errorbars(label=x, index=0, yerr=False) + errors_y = self._get_errorbars(label=y, index=0, xerr=False) + if len(errors_x) > 0 or len(errors_y) > 0: + err_kwds = dict(errors_x, **errors_y) + err_kwds["ecolor"] = scatter.get_facecolor()[0] + ax.errorbar(data[x].values, data[y].values, linestyle="none", **err_kwds) + + def _get_c_values(self, color, color_by_categorical: bool, c_is_column: bool): + c = self.c + if c is not None and color is not None: + raise TypeError("Specify exactly one of `c` and `color`") + if c is None and color is None: + c_values = self.plt.rcParams["patch.facecolor"] + elif color is not None: + c_values = color + elif color_by_categorical: + c_values = self.data[c].cat.codes + elif c_is_column: + c_values = self.data[c].values + else: + c_values = c + return c_values + + def _get_norm_and_cmap(self, c_values, color_by_categorical: bool): + c = self.c + if self.colormap is not None: + cmap = mpl.colormaps.get_cmap(self.colormap) + # cmap is only used if c_values are integers, otherwise UserWarning. + # GH-53908: additionally call isinstance() because is_integer_dtype + # returns True for "b" (meaning "blue" and not int8 in this context) + elif not isinstance(c_values, str) and is_integer_dtype(c_values): + # pandas uses colormap, matplotlib uses cmap. + cmap = mpl.colormaps["Greys"] + else: + cmap = None + + if color_by_categorical and cmap is not None: + from matplotlib import colors + + n_cats = len(self.data[c].cat.categories) + cmap = colors.ListedColormap([cmap(i) for i in range(cmap.N)]) + bounds = np.linspace(0, n_cats, n_cats + 1) + norm = colors.BoundaryNorm(bounds, cmap.N) + # TODO: warn that we are ignoring self.norm if user specified it? + # Doesn't happen in any tests 2023-11-09 + else: + norm = self.norm + return norm, cmap + + def _get_colorbar(self, c_values, c_is_column: bool) -> bool: + # plot colorbar if + # 1. colormap is assigned, and + # 2.`c` is a column containing only numeric values + plot_colorbar = self.colormap or c_is_column + cb = self.colorbar + if cb is lib.no_default: + return is_numeric_dtype(c_values) and plot_colorbar + return cb + + +class HexBinPlot(PlanePlot): + @property + def _kind(self) -> Literal["hexbin"]: + return "hexbin" + + def __init__(self, data, x, y, C=None, *, colorbar: bool = True, **kwargs) -> None: + super().__init__(data, x, y, **kwargs) + if is_integer(C) and not self.data.columns._holds_integer(): + C = self.data.columns[C] + self.C = C + + self.colorbar = colorbar + + # Scatter plot allows to plot objects data + if len(self.data[self.x]._get_numeric_data()) == 0: + raise ValueError(self._kind + " requires x column to be numeric") + if len(self.data[self.y]._get_numeric_data()) == 0: + raise ValueError(self._kind + " requires y column to be numeric") + + def _make_plot(self, fig: Figure) -> None: + x, y, data, C = self.x, self.y, self.data, self.C + ax = self.axes[0] + # pandas uses colormap, matplotlib uses cmap. + cmap = self.colormap or "BuGn" + cmap = mpl.colormaps.get_cmap(cmap) + cb = self.colorbar + + if C is None: + c_values = None + else: + c_values = data[C].values + + ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap, **self.kwds) + if cb: + self._plot_colorbar(ax, fig=fig) + + def _make_legend(self) -> None: + pass + + +class LinePlot(MPLPlot): + _default_rot = 0 + + @property + def orientation(self) -> PlottingOrientation: + return "vertical" + + @property + def _kind(self) -> Literal["line", "area", "hist", "kde", "box"]: + return "line" + + def __init__(self, data, **kwargs) -> None: + from pandas.plotting import plot_params + + MPLPlot.__init__(self, data, **kwargs) + if self.stacked: + self.data = self.data.fillna(value=0) + self.x_compat = plot_params["x_compat"] + if "x_compat" in self.kwds: + self.x_compat = bool(self.kwds.pop("x_compat")) + + @final + def _is_ts_plot(self) -> bool: + # this is slightly deceptive + return not self.x_compat and self.use_index and self._use_dynamic_x() + + @final + def _use_dynamic_x(self) -> bool: + return use_dynamic_x(self._get_ax(0), self.data) + + def _make_plot(self, fig: Figure) -> None: + if self._is_ts_plot(): + data = maybe_convert_index(self._get_ax(0), self.data) + + x = data.index # dummy, not used + plotf = self._ts_plot + it = data.items() + else: + x = self._get_xticks() + # error: Incompatible types in assignment (expression has type + # "Callable[[Any, Any, Any, Any, Any, Any, KwArg(Any)], Any]", variable has + # type "Callable[[Any, Any, Any, Any, KwArg(Any)], Any]") + plotf = self._plot # type: ignore[assignment] + # error: Incompatible types in assignment (expression has type + # "Iterator[tuple[Hashable, ndarray[Any, Any]]]", variable has + # type "Iterable[tuple[Hashable, Series]]") + it = self._iter_data(data=self.data) # type: ignore[assignment] + + stacking_id = self._get_stacking_id() + is_errorbar = com.any_not_none(*self.errors.values()) + + colors = self._get_colors() + for i, (label, y) in enumerate(it): + ax = self._get_ax(i) + kwds = self.kwds.copy() + if self.color is not None: + kwds["color"] = self.color + style, kwds = self._apply_style_colors( + colors, + kwds, + i, + # error: Argument 4 to "_apply_style_colors" of "MPLPlot" has + # incompatible type "Hashable"; expected "str" + label, # type: ignore[arg-type] + ) + + errors = self._get_errorbars(label=label, index=i) + kwds = dict(kwds, **errors) + + label = pprint_thing(label) + label = self._mark_right_label(label, index=i) + kwds["label"] = label + + newlines = plotf( + ax, + x, + y, + style=style, + column_num=i, + stacking_id=stacking_id, + is_errorbar=is_errorbar, + **kwds, + ) + self._append_legend_handles_labels(newlines[0], label) + + if self._is_ts_plot(): + # reset of xlim should be used for ts data + # TODO: GH28021, should find a way to change view limit on xaxis + lines = get_all_lines(ax) + left, right = get_xlim(lines) + ax.set_xlim(left, right) + + # error: Signature of "_plot" incompatible with supertype "MPLPlot" + @classmethod + def _plot( # type: ignore[override] + cls, + ax: Axes, + x, + y: np.ndarray, + style=None, + column_num=None, + stacking_id=None, + **kwds, + ): + # column_num is used to get the target column from plotf in line and + # area plots + if column_num == 0: + cls._initialize_stacker(ax, stacking_id, len(y)) + y_values = cls._get_stacked_values(ax, stacking_id, y, kwds["label"]) + lines = MPLPlot._plot(ax, x, y_values, style=style, **kwds) + cls._update_stacker(ax, stacking_id, y) + return lines + + @final + def _ts_plot(self, ax: Axes, x, data: Series, style=None, **kwds): + # accept x to be consistent with normal plot func, + # x is not passed to tsplot as it uses data.index as x coordinate + # column_num must be in kwds for stacking purpose + freq, data = maybe_resample(data, ax, kwds) + + # Set ax with freq info + decorate_axes(ax, freq) + # digging deeper + if hasattr(ax, "left_ax"): + decorate_axes(ax.left_ax, freq) + if hasattr(ax, "right_ax"): + decorate_axes(ax.right_ax, freq) + # TODO #54485 + ax._plot_data.append((data, self._kind, kwds)) # type: ignore[attr-defined] + + lines = self._plot(ax, data.index, np.asarray(data.values), style=style, **kwds) + # set date formatter, locators and rescale limits + # TODO #54485 + format_dateaxis(ax, ax.freq, data.index) # type: ignore[arg-type, attr-defined] + return lines + + @final + def _get_stacking_id(self) -> int | None: + if self.stacked: + return id(self.data) + else: + return None + + @final + @classmethod + def _initialize_stacker(cls, ax: Axes, stacking_id, n: int) -> None: + if stacking_id is None: + return + if not hasattr(ax, "_stacker_pos_prior"): + # TODO #54485 + ax._stacker_pos_prior = {} # type: ignore[attr-defined] + if not hasattr(ax, "_stacker_neg_prior"): + # TODO #54485 + ax._stacker_neg_prior = {} # type: ignore[attr-defined] + # TODO #54485 + ax._stacker_pos_prior[stacking_id] = np.zeros(n) # type: ignore[attr-defined] + # TODO #54485 + ax._stacker_neg_prior[stacking_id] = np.zeros(n) # type: ignore[attr-defined] + + @final + @classmethod + def _get_stacked_values( + cls, ax: Axes, stacking_id: int | None, values: np.ndarray, label + ) -> np.ndarray: + if stacking_id is None: + return values + if not hasattr(ax, "_stacker_pos_prior"): + # stacker may not be initialized for subplots + cls._initialize_stacker(ax, stacking_id, len(values)) + + if (values >= 0).all(): + # TODO #54485 + return ( + ax._stacker_pos_prior[stacking_id] # type: ignore[attr-defined] + + values + ) + elif (values <= 0).all(): + # TODO #54485 + return ( + ax._stacker_neg_prior[stacking_id] # type: ignore[attr-defined] + + values + ) + + raise ValueError( + "When stacked is True, each column must be either " + "all positive or all negative. " + f"Column '{label}' contains both positive and negative values" + ) + + @final + @classmethod + def _update_stacker(cls, ax: Axes, stacking_id: int | None, values) -> None: + if stacking_id is None: + return + if (values >= 0).all(): + # TODO #54485 + ax._stacker_pos_prior[stacking_id] += values # type: ignore[attr-defined] + elif (values <= 0).all(): + # TODO #54485 + ax._stacker_neg_prior[stacking_id] += values # type: ignore[attr-defined] + + def _post_plot_logic(self, ax: Axes, data) -> None: + from matplotlib.ticker import FixedLocator + + def get_label(i): + if is_float(i) and i.is_integer(): + i = int(i) + try: + return pprint_thing(data.index[i]) + except Exception: + return "" + + if self._need_to_set_index: + xticks = ax.get_xticks() + xticklabels = [get_label(x) for x in xticks] + # error: Argument 1 to "FixedLocator" has incompatible type "ndarray[Any, + # Any]"; expected "Sequence[float]" + ax.xaxis.set_major_locator(FixedLocator(xticks)) # type: ignore[arg-type] + ax.set_xticklabels(xticklabels) + + # If the index is an irregular time series, then by default + # we rotate the tick labels. The exception is if there are + # subplots which don't share their x-axes, in which we case + # we don't rotate the ticklabels as by default the subplots + # would be too close together. + condition = ( + not self._use_dynamic_x() + and (data.index._is_all_dates and self.use_index) + and (not self.subplots or (self.subplots and self.sharex)) + ) + + index_name = self._get_index_name() + + if condition: + # irregular TS rotated 30 deg. by default + # probably a better place to check / set this. + if not self._rot_set: + self.rot = 30 + format_date_labels(ax, rot=self.rot) + + if index_name is not None and self.use_index: + ax.set_xlabel(index_name) + + +class AreaPlot(LinePlot): + @property + def _kind(self) -> Literal["area"]: + return "area" + + def __init__(self, data, **kwargs) -> None: + kwargs.setdefault("stacked", True) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "Downcasting object dtype arrays", + category=FutureWarning, + ) + data = data.fillna(value=0) + LinePlot.__init__(self, data, **kwargs) + + if not self.stacked: + # use smaller alpha to distinguish overlap + self.kwds.setdefault("alpha", 0.5) + + if self.logy or self.loglog: + raise ValueError("Log-y scales are not supported in area plot") + + # error: Signature of "_plot" incompatible with supertype "MPLPlot" + @classmethod + def _plot( # type: ignore[override] + cls, + ax: Axes, + x, + y: np.ndarray, + style=None, + column_num=None, + stacking_id=None, + is_errorbar: bool = False, + **kwds, + ): + if column_num == 0: + cls._initialize_stacker(ax, stacking_id, len(y)) + y_values = cls._get_stacked_values(ax, stacking_id, y, kwds["label"]) + + # need to remove label, because subplots uses mpl legend as it is + line_kwds = kwds.copy() + line_kwds.pop("label") + lines = MPLPlot._plot(ax, x, y_values, style=style, **line_kwds) + + # get data from the line to get coordinates for fill_between + xdata, y_values = lines[0].get_data(orig=False) + + # unable to use ``_get_stacked_values`` here to get starting point + if stacking_id is None: + start = np.zeros(len(y)) + elif (y >= 0).all(): + # TODO #54485 + start = ax._stacker_pos_prior[stacking_id] # type: ignore[attr-defined] + elif (y <= 0).all(): + # TODO #54485 + start = ax._stacker_neg_prior[stacking_id] # type: ignore[attr-defined] + else: + start = np.zeros(len(y)) + + if "color" not in kwds: + kwds["color"] = lines[0].get_color() + + rect = ax.fill_between(xdata, start, y_values, **kwds) + cls._update_stacker(ax, stacking_id, y) + + # LinePlot expects list of artists + res = [rect] + return res + + def _post_plot_logic(self, ax: Axes, data) -> None: + LinePlot._post_plot_logic(self, ax, data) + + is_shared_y = len(list(ax.get_shared_y_axes())) > 0 + # do not override the default axis behaviour in case of shared y axes + if self.ylim is None and not is_shared_y: + if (data >= 0).all().all(): + ax.set_ylim(0, None) + elif (data <= 0).all().all(): + ax.set_ylim(None, 0) + + +class BarPlot(MPLPlot): + @property + def _kind(self) -> Literal["bar", "barh"]: + return "bar" + + _default_rot = 90 + + @property + def orientation(self) -> PlottingOrientation: + return "vertical" + + def __init__( + self, + data, + *, + align="center", + bottom=0, + left=0, + width=0.5, + position=0.5, + log=False, + **kwargs, + ) -> None: + # we have to treat a series differently than a + # 1-column DataFrame w.r.t. color handling + self._is_series = isinstance(data, ABCSeries) + self.bar_width = width + self._align = align + self._position = position + self.tick_pos = np.arange(len(data)) + + if is_list_like(bottom): + bottom = np.array(bottom) + if is_list_like(left): + left = np.array(left) + self.bottom = bottom + self.left = left + + self.log = log + + MPLPlot.__init__(self, data, **kwargs) + + @cache_readonly + def ax_pos(self) -> np.ndarray: + return self.tick_pos - self.tickoffset + + @cache_readonly + def tickoffset(self): + if self.stacked or self.subplots: + return self.bar_width * self._position + elif self._align == "edge": + w = self.bar_width / self.nseries + return self.bar_width * (self._position - 0.5) + w * 0.5 + else: + return self.bar_width * self._position + + @cache_readonly + def lim_offset(self): + if self.stacked or self.subplots: + if self._align == "edge": + return self.bar_width / 2 + else: + return 0 + elif self._align == "edge": + w = self.bar_width / self.nseries + return w * 0.5 + else: + return 0 + + # error: Signature of "_plot" incompatible with supertype "MPLPlot" + @classmethod + def _plot( # type: ignore[override] + cls, + ax: Axes, + x, + y: np.ndarray, + w, + start: int | npt.NDArray[np.intp] = 0, + log: bool = False, + **kwds, + ): + return ax.bar(x, y, w, bottom=start, log=log, **kwds) + + @property + def _start_base(self): + return self.bottom + + def _make_plot(self, fig: Figure) -> None: + colors = self._get_colors() + ncolors = len(colors) + + pos_prior = neg_prior = np.zeros(len(self.data)) + K = self.nseries + + data = self.data.fillna(0) + for i, (label, y) in enumerate(self._iter_data(data=data)): + ax = self._get_ax(i) + kwds = self.kwds.copy() + if self._is_series: + kwds["color"] = colors + elif isinstance(colors, dict): + kwds["color"] = colors[label] + else: + kwds["color"] = colors[i % ncolors] + + errors = self._get_errorbars(label=label, index=i) + kwds = dict(kwds, **errors) + + label = pprint_thing(label) + label = self._mark_right_label(label, index=i) + + if (("yerr" in kwds) or ("xerr" in kwds)) and (kwds.get("ecolor") is None): + kwds["ecolor"] = mpl.rcParams["xtick.color"] + + start = 0 + if self.log and (y >= 1).all(): + start = 1 + start = start + self._start_base + + kwds["align"] = self._align + if self.subplots: + w = self.bar_width / 2 + rect = self._plot( + ax, + self.ax_pos + w, + y, + self.bar_width, + start=start, + label=label, + log=self.log, + **kwds, + ) + ax.set_title(label) + elif self.stacked: + mask = y > 0 + start = np.where(mask, pos_prior, neg_prior) + self._start_base + w = self.bar_width / 2 + rect = self._plot( + ax, + self.ax_pos + w, + y, + self.bar_width, + start=start, + label=label, + log=self.log, + **kwds, + ) + pos_prior = pos_prior + np.where(mask, y, 0) + neg_prior = neg_prior + np.where(mask, 0, y) + else: + w = self.bar_width / K + rect = self._plot( + ax, + self.ax_pos + (i + 0.5) * w, + y, + w, + start=start, + label=label, + log=self.log, + **kwds, + ) + self._append_legend_handles_labels(rect, label) + + def _post_plot_logic(self, ax: Axes, data) -> None: + if self.use_index: + str_index = [pprint_thing(key) for key in data.index] + else: + str_index = [pprint_thing(key) for key in range(data.shape[0])] + + s_edge = self.ax_pos[0] - 0.25 + self.lim_offset + e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset + + self._decorate_ticks(ax, self._get_index_name(), str_index, s_edge, e_edge) + + def _decorate_ticks( + self, + ax: Axes, + name: str | None, + ticklabels: list[str], + start_edge: float, + end_edge: float, + ) -> None: + ax.set_xlim((start_edge, end_edge)) + + if self.xticks is not None: + ax.set_xticks(np.array(self.xticks)) + else: + ax.set_xticks(self.tick_pos) + ax.set_xticklabels(ticklabels) + + if name is not None and self.use_index: + ax.set_xlabel(name) + + +class BarhPlot(BarPlot): + @property + def _kind(self) -> Literal["barh"]: + return "barh" + + _default_rot = 0 + + @property + def orientation(self) -> Literal["horizontal"]: + return "horizontal" + + @property + def _start_base(self): + return self.left + + # error: Signature of "_plot" incompatible with supertype "MPLPlot" + @classmethod + def _plot( # type: ignore[override] + cls, + ax: Axes, + x, + y: np.ndarray, + w, + start: int | npt.NDArray[np.intp] = 0, + log: bool = False, + **kwds, + ): + return ax.barh(x, y, w, left=start, log=log, **kwds) + + def _get_custom_index_name(self): + return self.ylabel + + def _decorate_ticks( + self, + ax: Axes, + name: str | None, + ticklabels: list[str], + start_edge: float, + end_edge: float, + ) -> None: + # horizontal bars + ax.set_ylim((start_edge, end_edge)) + ax.set_yticks(self.tick_pos) + ax.set_yticklabels(ticklabels) + if name is not None and self.use_index: + ax.set_ylabel(name) + # error: Argument 1 to "set_xlabel" of "_AxesBase" has incompatible type + # "Hashable | None"; expected "str" + ax.set_xlabel(self.xlabel) # type: ignore[arg-type] + + +class PiePlot(MPLPlot): + @property + def _kind(self) -> Literal["pie"]: + return "pie" + + _layout_type = "horizontal" + + def __init__(self, data, kind=None, **kwargs) -> None: + data = data.fillna(value=0) + if (data < 0).any().any(): + raise ValueError(f"{self._kind} plot doesn't allow negative values") + MPLPlot.__init__(self, data, kind=kind, **kwargs) + + @classmethod + def _validate_log_kwd( + cls, + kwd: str, + value: bool | None | Literal["sym"], + ) -> bool | None | Literal["sym"]: + super()._validate_log_kwd(kwd=kwd, value=value) + if value is not False: + warnings.warn( + f"PiePlot ignores the '{kwd}' keyword", + UserWarning, + stacklevel=find_stack_level(), + ) + return False + + def _validate_color_args(self, color, colormap) -> None: + # TODO: warn if color is passed and ignored? + return None + + def _make_plot(self, fig: Figure) -> None: + colors = self._get_colors(num_colors=len(self.data), color_kwds="colors") + self.kwds.setdefault("colors", colors) + + for i, (label, y) in enumerate(self._iter_data(data=self.data)): + ax = self._get_ax(i) + if label is not None: + label = pprint_thing(label) + ax.set_ylabel(label) + + kwds = self.kwds.copy() + + def blank_labeler(label, value): + if value == 0: + return "" + else: + return label + + idx = [pprint_thing(v) for v in self.data.index] + labels = kwds.pop("labels", idx) + # labels is used for each wedge's labels + # Blank out labels for values of 0 so they don't overlap + # with nonzero wedges + if labels is not None: + blabels = [blank_labeler(left, value) for left, value in zip(labels, y)] + else: + blabels = None + results = ax.pie(y, labels=blabels, **kwds) + + if kwds.get("autopct", None) is not None: + patches, texts, autotexts = results + else: + patches, texts = results + autotexts = [] + + if self.fontsize is not None: + for t in texts + autotexts: + t.set_fontsize(self.fontsize) + + # leglabels is used for legend labels + leglabels = labels if labels is not None else idx + for _patch, _leglabel in zip(patches, leglabels): + self._append_legend_handles_labels(_patch, _leglabel) + + def _post_plot_logic(self, ax: Axes, data) -> None: + pass diff --git a/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/groupby.py b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/groupby.py new file mode 100644 index 0000000000000000000000000000000000000000..cbb66065a8039c63b7181619aea3aa74277da4a5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/groupby.py @@ -0,0 +1,142 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.core.dtypes.missing import remove_na_arraylike + +from pandas import ( + MultiIndex, + concat, +) + +from pandas.plotting._matplotlib.misc import unpack_single_str_list + +if TYPE_CHECKING: + from collections.abc import Hashable + + from pandas._typing import IndexLabel + + from pandas import ( + DataFrame, + Series, + ) + + +def create_iter_data_given_by( + data: DataFrame, kind: str = "hist" +) -> dict[Hashable, DataFrame | Series]: + """ + Create data for iteration given `by` is assigned or not, and it is only + used in both hist and boxplot. + + If `by` is assigned, return a dictionary of DataFrames in which the key of + dictionary is the values in groups. + If `by` is not assigned, return input as is, and this preserves current + status of iter_data. + + Parameters + ---------- + data : reformatted grouped data from `_compute_plot_data` method. + kind : str, plot kind. This function is only used for `hist` and `box` plots. + + Returns + ------- + iter_data : DataFrame or Dictionary of DataFrames + + Examples + -------- + If `by` is assigned: + + >>> import numpy as np + >>> tuples = [('h1', 'a'), ('h1', 'b'), ('h2', 'a'), ('h2', 'b')] + >>> mi = pd.MultiIndex.from_tuples(tuples) + >>> value = [[1, 3, np.nan, np.nan], + ... [3, 4, np.nan, np.nan], [np.nan, np.nan, 5, 6]] + >>> data = pd.DataFrame(value, columns=mi) + >>> create_iter_data_given_by(data) + {'h1': h1 + a b + 0 1.0 3.0 + 1 3.0 4.0 + 2 NaN NaN, 'h2': h2 + a b + 0 NaN NaN + 1 NaN NaN + 2 5.0 6.0} + """ + + # For `hist` plot, before transformation, the values in level 0 are values + # in groups and subplot titles, and later used for column subselection and + # iteration; For `box` plot, values in level 1 are column names to show, + # and are used for iteration and as subplots titles. + if kind == "hist": + level = 0 + else: + level = 1 + + # Select sub-columns based on the value of level of MI, and if `by` is + # assigned, data must be a MI DataFrame + assert isinstance(data.columns, MultiIndex) + return { + col: data.loc[:, data.columns.get_level_values(level) == col] + for col in data.columns.levels[level] + } + + +def reconstruct_data_with_by( + data: DataFrame, by: IndexLabel, cols: IndexLabel +) -> DataFrame: + """ + Internal function to group data, and reassign multiindex column names onto the + result in order to let grouped data be used in _compute_plot_data method. + + Parameters + ---------- + data : Original DataFrame to plot + by : grouped `by` parameter selected by users + cols : columns of data set (excluding columns used in `by`) + + Returns + ------- + Output is the reconstructed DataFrame with MultiIndex columns. The first level + of MI is unique values of groups, and second level of MI is the columns + selected by users. + + Examples + -------- + >>> d = {'h': ['h1', 'h1', 'h2'], 'a': [1, 3, 5], 'b': [3, 4, 6]} + >>> df = pd.DataFrame(d) + >>> reconstruct_data_with_by(df, by='h', cols=['a', 'b']) + h1 h2 + a b a b + 0 1.0 3.0 NaN NaN + 1 3.0 4.0 NaN NaN + 2 NaN NaN 5.0 6.0 + """ + by_modified = unpack_single_str_list(by) + grouped = data.groupby(by_modified) + + data_list = [] + for key, group in grouped: + # error: List item 1 has incompatible type "Union[Hashable, + # Sequence[Hashable]]"; expected "Iterable[Hashable]" + columns = MultiIndex.from_product([[key], cols]) # type: ignore[list-item] + sub_group = group[cols] + sub_group.columns = columns + data_list.append(sub_group) + + data = concat(data_list, axis=1) + return data + + +def reformat_hist_y_given_by(y: np.ndarray, by: IndexLabel | None) -> np.ndarray: + """Internal function to reformat y given `by` is applied or not for hist plot. + + If by is None, input y is 1-d with NaN removed; and if by is not None, groupby + will take place and input y is multi-dimensional array. + """ + if by is not None and len(y.shape) > 1: + return np.array([remove_na_arraylike(col) for col in y.T]).T + return remove_na_arraylike(y) diff --git a/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/misc.py b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..1f9212587e05e2e3689b680ff01ae7780230657e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/misc.py @@ -0,0 +1,481 @@ +from __future__ import annotations + +import random +from typing import TYPE_CHECKING + +from matplotlib import patches +import matplotlib.lines as mlines +import numpy as np + +from pandas.core.dtypes.missing import notna + +from pandas.io.formats.printing import pprint_thing +from pandas.plotting._matplotlib.style import get_standard_colors +from pandas.plotting._matplotlib.tools import ( + create_subplots, + do_adjust_figure, + maybe_adjust_figure, + set_ticks_props, +) + +if TYPE_CHECKING: + from collections.abc import Hashable + + from matplotlib.axes import Axes + from matplotlib.figure import Figure + + from pandas import ( + DataFrame, + Index, + Series, + ) + + +def scatter_matrix( + frame: DataFrame, + alpha: float = 0.5, + figsize: tuple[float, float] | None = None, + ax=None, + grid: bool = False, + diagonal: str = "hist", + marker: str = ".", + density_kwds=None, + hist_kwds=None, + range_padding: float = 0.05, + **kwds, +): + df = frame._get_numeric_data() + n = df.columns.size + naxes = n * n + fig, axes = create_subplots(naxes=naxes, figsize=figsize, ax=ax, squeeze=False) + + # no gaps between subplots + maybe_adjust_figure(fig, wspace=0, hspace=0) + + mask = notna(df) + + marker = _get_marker_compat(marker) + + hist_kwds = hist_kwds or {} + density_kwds = density_kwds or {} + + # GH 14855 + kwds.setdefault("edgecolors", "none") + + boundaries_list = [] + for a in df.columns: + values = df[a].values[mask[a].values] + rmin_, rmax_ = np.min(values), np.max(values) + rdelta_ext = (rmax_ - rmin_) * range_padding / 2 + boundaries_list.append((rmin_ - rdelta_ext, rmax_ + rdelta_ext)) + + for i, a in enumerate(df.columns): + for j, b in enumerate(df.columns): + ax = axes[i, j] + + if i == j: + values = df[a].values[mask[a].values] + + # Deal with the diagonal by drawing a histogram there. + if diagonal == "hist": + ax.hist(values, **hist_kwds) + + elif diagonal in ("kde", "density"): + from scipy.stats import gaussian_kde + + y = values + gkde = gaussian_kde(y) + ind = np.linspace(y.min(), y.max(), 1000) + ax.plot(ind, gkde.evaluate(ind), **density_kwds) + + ax.set_xlim(boundaries_list[i]) + + else: + common = (mask[a] & mask[b]).values + + ax.scatter( + df[b][common], df[a][common], marker=marker, alpha=alpha, **kwds + ) + + ax.set_xlim(boundaries_list[j]) + ax.set_ylim(boundaries_list[i]) + + ax.set_xlabel(b) + ax.set_ylabel(a) + + if j != 0: + ax.yaxis.set_visible(False) + if i != n - 1: + ax.xaxis.set_visible(False) + + if len(df.columns) > 1: + lim1 = boundaries_list[0] + locs = axes[0][1].yaxis.get_majorticklocs() + locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])] + adj = (locs - lim1[0]) / (lim1[1] - lim1[0]) + + lim0 = axes[0][0].get_ylim() + adj = adj * (lim0[1] - lim0[0]) + lim0[0] + axes[0][0].yaxis.set_ticks(adj) + + if np.all(locs == locs.astype(int)): + # if all ticks are int + locs = locs.astype(int) + axes[0][0].yaxis.set_ticklabels(locs) + + set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0) + + return axes + + +def _get_marker_compat(marker): + if marker not in mlines.lineMarkers: + return "o" + return marker + + +def radviz( + frame: DataFrame, + class_column, + ax: Axes | None = None, + color=None, + colormap=None, + **kwds, +) -> Axes: + import matplotlib.pyplot as plt + + def normalize(series): + a = min(series) + b = max(series) + return (series - a) / (b - a) + + n = len(frame) + classes = frame[class_column].drop_duplicates() + class_col = frame[class_column] + df = frame.drop(class_column, axis=1).apply(normalize) + + if ax is None: + ax = plt.gca() + ax.set_xlim(-1, 1) + ax.set_ylim(-1, 1) + + to_plot: dict[Hashable, list[list]] = {} + colors = get_standard_colors( + num_colors=len(classes), colormap=colormap, color_type="random", color=color + ) + + for kls in classes: + to_plot[kls] = [[], []] + + m = len(frame.columns) - 1 + s = np.array( + [(np.cos(t), np.sin(t)) for t in [2 * np.pi * (i / m) for i in range(m)]] + ) + + for i in range(n): + row = df.iloc[i].values + row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1) + y = (s * row_).sum(axis=0) / row.sum() + kls = class_col.iat[i] + to_plot[kls][0].append(y[0]) + to_plot[kls][1].append(y[1]) + + for i, kls in enumerate(classes): + ax.scatter( + to_plot[kls][0], + to_plot[kls][1], + color=colors[i], + label=pprint_thing(kls), + **kwds, + ) + ax.legend() + + ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor="none")) + + for xy, name in zip(s, df.columns): + ax.add_patch(patches.Circle(xy, radius=0.025, facecolor="gray")) + + if xy[0] < 0.0 and xy[1] < 0.0: + ax.text( + xy[0] - 0.025, xy[1] - 0.025, name, ha="right", va="top", size="small" + ) + elif xy[0] < 0.0 <= xy[1]: + ax.text( + xy[0] - 0.025, + xy[1] + 0.025, + name, + ha="right", + va="bottom", + size="small", + ) + elif xy[1] < 0.0 <= xy[0]: + ax.text( + xy[0] + 0.025, xy[1] - 0.025, name, ha="left", va="top", size="small" + ) + elif xy[0] >= 0.0 and xy[1] >= 0.0: + ax.text( + xy[0] + 0.025, xy[1] + 0.025, name, ha="left", va="bottom", size="small" + ) + + ax.axis("equal") + return ax + + +def andrews_curves( + frame: DataFrame, + class_column, + ax: Axes | None = None, + samples: int = 200, + color=None, + colormap=None, + **kwds, +) -> Axes: + import matplotlib.pyplot as plt + + def function(amplitudes): + def f(t): + x1 = amplitudes[0] + result = x1 / np.sqrt(2.0) + + # Take the rest of the coefficients and resize them + # appropriately. Take a copy of amplitudes as otherwise numpy + # deletes the element from amplitudes itself. + coeffs = np.delete(np.copy(amplitudes), 0) + coeffs = np.resize(coeffs, (int((coeffs.size + 1) / 2), 2)) + + # Generate the harmonics and arguments for the sin and cos + # functions. + harmonics = np.arange(0, coeffs.shape[0]) + 1 + trig_args = np.outer(harmonics, t) + + result += np.sum( + coeffs[:, 0, np.newaxis] * np.sin(trig_args) + + coeffs[:, 1, np.newaxis] * np.cos(trig_args), + axis=0, + ) + return result + + return f + + n = len(frame) + class_col = frame[class_column] + classes = frame[class_column].drop_duplicates() + df = frame.drop(class_column, axis=1) + t = np.linspace(-np.pi, np.pi, samples) + used_legends: set[str] = set() + + color_values = get_standard_colors( + num_colors=len(classes), colormap=colormap, color_type="random", color=color + ) + colors = dict(zip(classes, color_values)) + if ax is None: + ax = plt.gca() + ax.set_xlim(-np.pi, np.pi) + for i in range(n): + row = df.iloc[i].values + f = function(row) + y = f(t) + kls = class_col.iat[i] + label = pprint_thing(kls) + if label not in used_legends: + used_legends.add(label) + ax.plot(t, y, color=colors[kls], label=label, **kwds) + else: + ax.plot(t, y, color=colors[kls], **kwds) + + ax.legend(loc="upper right") + ax.grid() + return ax + + +def bootstrap_plot( + series: Series, + fig: Figure | None = None, + size: int = 50, + samples: int = 500, + **kwds, +) -> Figure: + import matplotlib.pyplot as plt + + # TODO: is the failure mentioned below still relevant? + # random.sample(ndarray, int) fails on python 3.3, sigh + data = list(series.values) + samplings = [random.sample(data, size) for _ in range(samples)] + + means = np.array([np.mean(sampling) for sampling in samplings]) + medians = np.array([np.median(sampling) for sampling in samplings]) + midranges = np.array( + [(min(sampling) + max(sampling)) * 0.5 for sampling in samplings] + ) + if fig is None: + fig = plt.figure() + x = list(range(samples)) + axes = [] + ax1 = fig.add_subplot(2, 3, 1) + ax1.set_xlabel("Sample") + axes.append(ax1) + ax1.plot(x, means, **kwds) + ax2 = fig.add_subplot(2, 3, 2) + ax2.set_xlabel("Sample") + axes.append(ax2) + ax2.plot(x, medians, **kwds) + ax3 = fig.add_subplot(2, 3, 3) + ax3.set_xlabel("Sample") + axes.append(ax3) + ax3.plot(x, midranges, **kwds) + ax4 = fig.add_subplot(2, 3, 4) + ax4.set_xlabel("Mean") + axes.append(ax4) + ax4.hist(means, **kwds) + ax5 = fig.add_subplot(2, 3, 5) + ax5.set_xlabel("Median") + axes.append(ax5) + ax5.hist(medians, **kwds) + ax6 = fig.add_subplot(2, 3, 6) + ax6.set_xlabel("Midrange") + axes.append(ax6) + ax6.hist(midranges, **kwds) + for axis in axes: + plt.setp(axis.get_xticklabels(), fontsize=8) + plt.setp(axis.get_yticklabels(), fontsize=8) + if do_adjust_figure(fig): + plt.tight_layout() + return fig + + +def parallel_coordinates( + frame: DataFrame, + class_column, + cols=None, + ax: Axes | None = None, + color=None, + use_columns: bool = False, + xticks=None, + colormap=None, + axvlines: bool = True, + axvlines_kwds=None, + sort_labels: bool = False, + **kwds, +) -> Axes: + import matplotlib.pyplot as plt + + if axvlines_kwds is None: + axvlines_kwds = {"linewidth": 1, "color": "black"} + + n = len(frame) + classes = frame[class_column].drop_duplicates() + class_col = frame[class_column] + + if cols is None: + df = frame.drop(class_column, axis=1) + else: + df = frame[cols] + + used_legends: set[str] = set() + + ncols = len(df.columns) + + # determine values to use for xticks + x: list[int] | Index + if use_columns is True: + if not np.all(np.isreal(list(df.columns))): + raise ValueError("Columns must be numeric to be used as xticks") + x = df.columns + elif xticks is not None: + if not np.all(np.isreal(xticks)): + raise ValueError("xticks specified must be numeric") + if len(xticks) != ncols: + raise ValueError("Length of xticks must match number of columns") + x = xticks + else: + x = list(range(ncols)) + + if ax is None: + ax = plt.gca() + + color_values = get_standard_colors( + num_colors=len(classes), colormap=colormap, color_type="random", color=color + ) + + if sort_labels: + classes = sorted(classes) + color_values = sorted(color_values) + colors = dict(zip(classes, color_values)) + + for i in range(n): + y = df.iloc[i].values + kls = class_col.iat[i] + label = pprint_thing(kls) + if label not in used_legends: + used_legends.add(label) + ax.plot(x, y, color=colors[kls], label=label, **kwds) + else: + ax.plot(x, y, color=colors[kls], **kwds) + + if axvlines: + for i in x: + ax.axvline(i, **axvlines_kwds) + + ax.set_xticks(x) + ax.set_xticklabels(df.columns) + ax.set_xlim(x[0], x[-1]) + ax.legend(loc="upper right") + ax.grid() + return ax + + +def lag_plot(series: Series, lag: int = 1, ax: Axes | None = None, **kwds) -> Axes: + # workaround because `c='b'` is hardcoded in matplotlib's scatter method + import matplotlib.pyplot as plt + + kwds.setdefault("c", plt.rcParams["patch.facecolor"]) + + data = series.values + y1 = data[:-lag] + y2 = data[lag:] + if ax is None: + ax = plt.gca() + ax.set_xlabel("y(t)") + ax.set_ylabel(f"y(t + {lag})") + ax.scatter(y1, y2, **kwds) + return ax + + +def autocorrelation_plot(series: Series, ax: Axes | None = None, **kwds) -> Axes: + import matplotlib.pyplot as plt + + n = len(series) + data = np.asarray(series) + if ax is None: + ax = plt.gca() + ax.set_xlim(1, n) + ax.set_ylim(-1.0, 1.0) + mean = np.mean(data) + c0 = np.sum((data - mean) ** 2) / n + + def r(h): + return ((data[: n - h] - mean) * (data[h:] - mean)).sum() / n / c0 + + x = np.arange(n) + 1 + y = [r(loc) for loc in x] + z95 = 1.959963984540054 + z99 = 2.5758293035489004 + ax.axhline(y=z99 / np.sqrt(n), linestyle="--", color="grey") + ax.axhline(y=z95 / np.sqrt(n), color="grey") + ax.axhline(y=0.0, color="black") + ax.axhline(y=-z95 / np.sqrt(n), color="grey") + ax.axhline(y=-z99 / np.sqrt(n), linestyle="--", color="grey") + ax.set_xlabel("Lag") + ax.set_ylabel("Autocorrelation") + ax.plot(x, y, **kwds) + if "label" in kwds: + ax.legend() + ax.grid() + return ax + + +def unpack_single_str_list(keys): + # GH 42795 + if isinstance(keys, list) and len(keys) == 1: + keys = keys[0] + return keys diff --git a/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/style.py b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/style.py new file mode 100644 index 0000000000000000000000000000000000000000..bf4e4be3bfd82e6ce89d526aa0da555f67b9f565 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/style.py @@ -0,0 +1,278 @@ +from __future__ import annotations + +from collections.abc import ( + Collection, + Iterator, +) +import itertools +from typing import ( + TYPE_CHECKING, + cast, +) +import warnings + +import matplotlib as mpl +import matplotlib.colors +import numpy as np + +from pandas._typing import MatplotlibColor as Color +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import is_list_like + +import pandas.core.common as com + +if TYPE_CHECKING: + from matplotlib.colors import Colormap + + +def get_standard_colors( + num_colors: int, + colormap: Colormap | None = None, + color_type: str = "default", + color: dict[str, Color] | Color | Collection[Color] | None = None, +): + """ + Get standard colors based on `colormap`, `color_type` or `color` inputs. + + Parameters + ---------- + num_colors : int + Minimum number of colors to be returned. + Ignored if `color` is a dictionary. + colormap : :py:class:`matplotlib.colors.Colormap`, optional + Matplotlib colormap. + When provided, the resulting colors will be derived from the colormap. + color_type : {"default", "random"}, optional + Type of colors to derive. Used if provided `color` and `colormap` are None. + Ignored if either `color` or `colormap` are not None. + color : dict or str or sequence, optional + Color(s) to be used for deriving sequence of colors. + Can be either be a dictionary, or a single color (single color string, + or sequence of floats representing a single color), + or a sequence of colors. + + Returns + ------- + dict or list + Standard colors. Can either be a mapping if `color` was a dictionary, + or a list of colors with a length of `num_colors` or more. + + Warns + ----- + UserWarning + If both `colormap` and `color` are provided. + Parameter `color` will override. + """ + if isinstance(color, dict): + return color + + colors = _derive_colors( + color=color, + colormap=colormap, + color_type=color_type, + num_colors=num_colors, + ) + + return list(_cycle_colors(colors, num_colors=num_colors)) + + +def _derive_colors( + *, + color: Color | Collection[Color] | None, + colormap: str | Colormap | None, + color_type: str, + num_colors: int, +) -> list[Color]: + """ + Derive colors from either `colormap`, `color_type` or `color` inputs. + + Get a list of colors either from `colormap`, or from `color`, + or from `color_type` (if both `colormap` and `color` are None). + + Parameters + ---------- + color : str or sequence, optional + Color(s) to be used for deriving sequence of colors. + Can be either be a single color (single color string, or sequence of floats + representing a single color), or a sequence of colors. + colormap : :py:class:`matplotlib.colors.Colormap`, optional + Matplotlib colormap. + When provided, the resulting colors will be derived from the colormap. + color_type : {"default", "random"}, optional + Type of colors to derive. Used if provided `color` and `colormap` are None. + Ignored if either `color` or `colormap`` are not None. + num_colors : int + Number of colors to be extracted. + + Returns + ------- + list + List of colors extracted. + + Warns + ----- + UserWarning + If both `colormap` and `color` are provided. + Parameter `color` will override. + """ + if color is None and colormap is not None: + return _get_colors_from_colormap(colormap, num_colors=num_colors) + elif color is not None: + if colormap is not None: + warnings.warn( + "'color' and 'colormap' cannot be used simultaneously. Using 'color'", + stacklevel=find_stack_level(), + ) + return _get_colors_from_color(color) + else: + return _get_colors_from_color_type(color_type, num_colors=num_colors) + + +def _cycle_colors(colors: list[Color], num_colors: int) -> Iterator[Color]: + """Cycle colors until achieving max of `num_colors` or length of `colors`. + + Extra colors will be ignored by matplotlib if there are more colors + than needed and nothing needs to be done here. + """ + max_colors = max(num_colors, len(colors)) + yield from itertools.islice(itertools.cycle(colors), max_colors) + + +def _get_colors_from_colormap( + colormap: str | Colormap, + num_colors: int, +) -> list[Color]: + """Get colors from colormap.""" + cmap = _get_cmap_instance(colormap) + return [cmap(num) for num in np.linspace(0, 1, num=num_colors)] + + +def _get_cmap_instance(colormap: str | Colormap) -> Colormap: + """Get instance of matplotlib colormap.""" + if isinstance(colormap, str): + cmap = colormap + colormap = mpl.colormaps[colormap] + if colormap is None: + raise ValueError(f"Colormap {cmap} is not recognized") + return colormap + + +def _get_colors_from_color( + color: Color | Collection[Color], +) -> list[Color]: + """Get colors from user input color.""" + if len(color) == 0: + raise ValueError(f"Invalid color argument: {color}") + + if _is_single_color(color): + color = cast(Color, color) + return [color] + + color = cast(Collection[Color], color) + return list(_gen_list_of_colors_from_iterable(color)) + + +def _is_single_color(color: Color | Collection[Color]) -> bool: + """Check if `color` is a single color, not a sequence of colors. + + Single color is of these kinds: + - Named color "red", "C0", "firebrick" + - Alias "g" + - Sequence of floats, such as (0.1, 0.2, 0.3) or (0.1, 0.2, 0.3, 0.4). + + See Also + -------- + _is_single_string_color + """ + if isinstance(color, str) and _is_single_string_color(color): + # GH #36972 + return True + + if _is_floats_color(color): + return True + + return False + + +def _gen_list_of_colors_from_iterable(color: Collection[Color]) -> Iterator[Color]: + """ + Yield colors from string of several letters or from collection of colors. + """ + for x in color: + if _is_single_color(x): + yield x + else: + raise ValueError(f"Invalid color {x}") + + +def _is_floats_color(color: Color | Collection[Color]) -> bool: + """Check if color comprises a sequence of floats representing color.""" + return bool( + is_list_like(color) + and (len(color) == 3 or len(color) == 4) + and all(isinstance(x, (int, float)) for x in color) + ) + + +def _get_colors_from_color_type(color_type: str, num_colors: int) -> list[Color]: + """Get colors from user input color type.""" + if color_type == "default": + return _get_default_colors(num_colors) + elif color_type == "random": + return _get_random_colors(num_colors) + else: + raise ValueError("color_type must be either 'default' or 'random'") + + +def _get_default_colors(num_colors: int) -> list[Color]: + """Get `num_colors` of default colors from matplotlib rc params.""" + import matplotlib.pyplot as plt + + colors = [c["color"] for c in plt.rcParams["axes.prop_cycle"]] + return colors[0:num_colors] + + +def _get_random_colors(num_colors: int) -> list[Color]: + """Get `num_colors` of random colors.""" + return [_random_color(num) for num in range(num_colors)] + + +def _random_color(column: int) -> list[float]: + """Get a random color represented as a list of length 3""" + # GH17525 use common._random_state to avoid resetting the seed + rs = com.random_state(column) + return rs.rand(3).tolist() + + +def _is_single_string_color(color: Color) -> bool: + """Check if `color` is a single string color. + + Examples of single string colors: + - 'r' + - 'g' + - 'red' + - 'green' + - 'C3' + - 'firebrick' + + Parameters + ---------- + color : Color + Color string or sequence of floats. + + Returns + ------- + bool + True if `color` looks like a valid color. + False otherwise. + """ + conv = matplotlib.colors.ColorConverter() + try: + # error: Argument 1 to "to_rgba" of "ColorConverter" has incompatible type + # "str | Sequence[float]"; expected "tuple[float, float, float] | ..." + conv.to_rgba(color) # type: ignore[arg-type] + except ValueError: + return False + else: + return True diff --git a/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/timeseries.py b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/timeseries.py new file mode 100644 index 0000000000000000000000000000000000000000..c7ddfa55d0417f8c8fa4addd82faacedf90394d3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/timeseries.py @@ -0,0 +1,370 @@ +# TODO: Use the fact that axis can have units to simplify the process + +from __future__ import annotations + +import functools +from typing import ( + TYPE_CHECKING, + Any, + cast, +) +import warnings + +import numpy as np + +from pandas._libs.tslibs import ( + BaseOffset, + Period, + to_offset, +) +from pandas._libs.tslibs.dtypes import ( + OFFSET_TO_PERIOD_FREQSTR, + FreqGroup, +) + +from pandas.core.dtypes.generic import ( + ABCDatetimeIndex, + ABCPeriodIndex, + ABCTimedeltaIndex, +) + +from pandas.io.formats.printing import pprint_thing +from pandas.plotting._matplotlib.converter import ( + TimeSeries_DateFormatter, + TimeSeries_DateLocator, + TimeSeries_TimedeltaFormatter, +) +from pandas.tseries.frequencies import ( + get_period_alias, + is_subperiod, + is_superperiod, +) + +if TYPE_CHECKING: + from datetime import timedelta + + from matplotlib.axes import Axes + + from pandas._typing import NDFrameT + + from pandas import ( + DataFrame, + DatetimeIndex, + Index, + PeriodIndex, + Series, + ) + +# --------------------------------------------------------------------- +# Plotting functions and monkey patches + + +def maybe_resample(series: Series, ax: Axes, kwargs: dict[str, Any]): + # resample against axes freq if necessary + + if "how" in kwargs: + raise ValueError( + "'how' is not a valid keyword for plotting functions. If plotting " + "multiple objects on shared axes, resample manually first." + ) + + freq, ax_freq = _get_freq(ax, series) + + if freq is None: # pragma: no cover + raise ValueError("Cannot use dynamic axis without frequency info") + + # Convert DatetimeIndex to PeriodIndex + if isinstance(series.index, ABCDatetimeIndex): + series = series.to_period(freq=freq) + + if ax_freq is not None and freq != ax_freq: + if is_superperiod(freq, ax_freq): # upsample input + series = series.copy() + # error: "Index" has no attribute "asfreq" + series.index = series.index.asfreq( # type: ignore[attr-defined] + ax_freq, how="s" + ) + freq = ax_freq + elif _is_sup(freq, ax_freq): # one is weekly + # Resampling with PeriodDtype is deprecated, so we convert to + # DatetimeIndex, resample, then convert back. + ser_ts = series.to_timestamp() + ser_d = ser_ts.resample("D").last().dropna() + ser_freq = ser_d.resample(ax_freq).last().dropna() + series = ser_freq.to_period(ax_freq) + freq = ax_freq + elif is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq): + _upsample_others(ax, freq, kwargs) + else: # pragma: no cover + raise ValueError("Incompatible frequency conversion") + return freq, series + + +def _is_sub(f1: str, f2: str) -> bool: + return (f1.startswith("W") and is_subperiod("D", f2)) or ( + f2.startswith("W") and is_subperiod(f1, "D") + ) + + +def _is_sup(f1: str, f2: str) -> bool: + return (f1.startswith("W") and is_superperiod("D", f2)) or ( + f2.startswith("W") and is_superperiod(f1, "D") + ) + + +def _upsample_others(ax: Axes, freq: BaseOffset, kwargs: dict[str, Any]) -> None: + legend = ax.get_legend() + lines, labels = _replot_ax(ax, freq) + _replot_ax(ax, freq) + + other_ax = None + if hasattr(ax, "left_ax"): + other_ax = ax.left_ax + if hasattr(ax, "right_ax"): + other_ax = ax.right_ax + + if other_ax is not None: + rlines, rlabels = _replot_ax(other_ax, freq) + lines.extend(rlines) + labels.extend(rlabels) + + if legend is not None and kwargs.get("legend", True) and len(lines) > 0: + title: str | None = legend.get_title().get_text() + if title == "None": + title = None + ax.legend(lines, labels, loc="best", title=title) + + +def _replot_ax(ax: Axes, freq: BaseOffset): + data = getattr(ax, "_plot_data", None) + + # clear current axes and data + # TODO #54485 + ax._plot_data = [] # type: ignore[attr-defined] + ax.clear() + + decorate_axes(ax, freq) + + lines = [] + labels = [] + if data is not None: + for series, plotf, kwds in data: + series = series.copy() + idx = series.index.asfreq(freq, how="S") + series.index = idx + # TODO #54485 + ax._plot_data.append((series, plotf, kwds)) # type: ignore[attr-defined] + + # for tsplot + if isinstance(plotf, str): + from pandas.plotting._matplotlib import PLOT_CLASSES + + plotf = PLOT_CLASSES[plotf]._plot + + lines.append(plotf(ax, series.index._mpl_repr(), series.values, **kwds)[0]) + labels.append(pprint_thing(series.name)) + + return lines, labels + + +def decorate_axes(ax: Axes, freq: BaseOffset) -> None: + """Initialize axes for time-series plotting""" + if not hasattr(ax, "_plot_data"): + # TODO #54485 + ax._plot_data = [] # type: ignore[attr-defined] + + # TODO #54485 + ax.freq = freq # type: ignore[attr-defined] + xaxis = ax.get_xaxis() + # TODO #54485 + xaxis.freq = freq # type: ignore[attr-defined] + + +def _get_ax_freq(ax: Axes): + """ + Get the freq attribute of the ax object if set. + Also checks shared axes (eg when using secondary yaxis, sharex=True + or twinx) + """ + ax_freq = getattr(ax, "freq", None) + if ax_freq is None: + # check for left/right ax in case of secondary yaxis + if hasattr(ax, "left_ax"): + ax_freq = getattr(ax.left_ax, "freq", None) + elif hasattr(ax, "right_ax"): + ax_freq = getattr(ax.right_ax, "freq", None) + if ax_freq is None: + # check if a shared ax (sharex/twinx) has already freq set + shared_axes = ax.get_shared_x_axes().get_siblings(ax) + if len(shared_axes) > 1: + for shared_ax in shared_axes: + ax_freq = getattr(shared_ax, "freq", None) + if ax_freq is not None: + break + return ax_freq + + +def _get_period_alias(freq: timedelta | BaseOffset | str) -> str | None: + if isinstance(freq, BaseOffset): + freqstr = freq.name + else: + freqstr = to_offset(freq, is_period=True).rule_code + + return get_period_alias(freqstr) + + +def _get_freq(ax: Axes, series: Series): + # get frequency from data + freq = getattr(series.index, "freq", None) + if freq is None: + freq = getattr(series.index, "inferred_freq", None) + freq = to_offset(freq, is_period=True) + + ax_freq = _get_ax_freq(ax) + + # use axes freq if no data freq + if freq is None: + freq = ax_freq + + # get the period frequency + freq = _get_period_alias(freq) + return freq, ax_freq + + +def use_dynamic_x(ax: Axes, data: DataFrame | Series) -> bool: + freq = _get_index_freq(data.index) + ax_freq = _get_ax_freq(ax) + + if freq is None: # convert irregular if axes has freq info + freq = ax_freq + # do not use tsplot if irregular was plotted first + elif (ax_freq is None) and (len(ax.get_lines()) > 0): + return False + + if freq is None: + return False + + freq_str = _get_period_alias(freq) + + if freq_str is None: + return False + + # FIXME: hack this for 0.10.1, creating more technical debt...sigh + if isinstance(data.index, ABCDatetimeIndex): + # error: "BaseOffset" has no attribute "_period_dtype_code" + freq_str = OFFSET_TO_PERIOD_FREQSTR.get(freq_str, freq_str) + base = to_offset( + freq_str, is_period=True + )._period_dtype_code # type: ignore[attr-defined] + x = data.index + if base <= FreqGroup.FR_DAY.value: + return x[:1].is_normalized + period = Period(x[0], freq_str) + assert isinstance(period, Period) + return period.to_timestamp().tz_localize(x.tz) == x[0] + return True + + +def _get_index_freq(index: Index) -> BaseOffset | None: + freq = getattr(index, "freq", None) + if freq is None: + freq = getattr(index, "inferred_freq", None) + if freq == "B": + # error: "Index" has no attribute "dayofweek" + weekdays = np.unique(index.dayofweek) # type: ignore[attr-defined] + if (5 in weekdays) or (6 in weekdays): + freq = None + + freq = to_offset(freq) + return freq + + +def maybe_convert_index(ax: Axes, data: NDFrameT) -> NDFrameT: + # tsplot converts automatically, but don't want to convert index + # over and over for DataFrames + if isinstance(data.index, (ABCDatetimeIndex, ABCPeriodIndex)): + freq: str | BaseOffset | None = data.index.freq + + if freq is None: + # We only get here for DatetimeIndex + data.index = cast("DatetimeIndex", data.index) + freq = data.index.inferred_freq + freq = to_offset(freq) + + if freq is None: + freq = _get_ax_freq(ax) + + if freq is None: + raise ValueError("Could not get frequency alias for plotting") + + freq_str = _get_period_alias(freq) + + with warnings.catch_warnings(): + # suppress Period[B] deprecation warning + # TODO: need to find an alternative to this before the deprecation + # is enforced! + warnings.filterwarnings( + "ignore", + r"PeriodDtype\[B\] is deprecated", + category=FutureWarning, + ) + + if isinstance(data.index, ABCDatetimeIndex): + data = data.tz_localize(None).to_period(freq=freq_str) + elif isinstance(data.index, ABCPeriodIndex): + data.index = data.index.asfreq(freq=freq_str) + return data + + +# Patch methods for subplot. + + +def _format_coord(freq, t, y) -> str: + time_period = Period(ordinal=int(t), freq=freq) + return f"t = {time_period} y = {y:8f}" + + +def format_dateaxis( + subplot, freq: BaseOffset, index: DatetimeIndex | PeriodIndex +) -> None: + """ + Pretty-formats the date axis (x-axis). + + Major and minor ticks are automatically set for the frequency of the + current underlying series. As the dynamic mode is activated by + default, changing the limits of the x axis will intelligently change + the positions of the ticks. + """ + from matplotlib import pylab + + # handle index specific formatting + # Note: DatetimeIndex does not use this + # interface. DatetimeIndex uses matplotlib.date directly + if isinstance(index, ABCPeriodIndex): + majlocator = TimeSeries_DateLocator( + freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot + ) + minlocator = TimeSeries_DateLocator( + freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot + ) + subplot.xaxis.set_major_locator(majlocator) + subplot.xaxis.set_minor_locator(minlocator) + + majformatter = TimeSeries_DateFormatter( + freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot + ) + minformatter = TimeSeries_DateFormatter( + freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot + ) + subplot.xaxis.set_major_formatter(majformatter) + subplot.xaxis.set_minor_formatter(minformatter) + + # x and y coord info + subplot.format_coord = functools.partial(_format_coord, freq) + + elif isinstance(index, ABCTimedeltaIndex): + subplot.xaxis.set_major_formatter(TimeSeries_TimedeltaFormatter()) + else: + raise TypeError("index type not supported") + + pylab.draw_if_interactive() diff --git a/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/tools.py b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/tools.py new file mode 100644 index 0000000000000000000000000000000000000000..898b5b25e7b0171e4b41106bf340e7fbc3bbb735 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/pandas/plotting/_matplotlib/tools.py @@ -0,0 +1,492 @@ +# being a bit too dynamic +from __future__ import annotations + +from math import ceil +from typing import TYPE_CHECKING +import warnings + +from matplotlib import ticker +import matplotlib.table +import numpy as np + +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import is_list_like +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCIndex, + ABCSeries, +) + +if TYPE_CHECKING: + from collections.abc import ( + Iterable, + Sequence, + ) + + from matplotlib.axes import Axes + from matplotlib.axis import Axis + from matplotlib.figure import Figure + from matplotlib.lines import Line2D + from matplotlib.table import Table + + from pandas import ( + DataFrame, + Series, + ) + + +def do_adjust_figure(fig: Figure) -> bool: + """Whether fig has constrained_layout enabled.""" + if not hasattr(fig, "get_constrained_layout"): + return False + return not fig.get_constrained_layout() + + +def maybe_adjust_figure(fig: Figure, *args, **kwargs) -> None: + """Call fig.subplots_adjust unless fig has constrained_layout enabled.""" + if do_adjust_figure(fig): + fig.subplots_adjust(*args, **kwargs) + + +def format_date_labels(ax: Axes, rot) -> None: + # mini version of autofmt_xdate + for label in ax.get_xticklabels(): + label.set_horizontalalignment("right") + label.set_rotation(rot) + fig = ax.get_figure() + if fig is not None: + # should always be a Figure but can technically be None + maybe_adjust_figure(fig, bottom=0.2) + + +def table( + ax, data: DataFrame | Series, rowLabels=None, colLabels=None, **kwargs +) -> Table: + if isinstance(data, ABCSeries): + data = data.to_frame() + elif isinstance(data, ABCDataFrame): + pass + else: + raise ValueError("Input data must be DataFrame or Series") + + if rowLabels is None: + rowLabels = data.index + + if colLabels is None: + colLabels = data.columns + + cellText = data.values + + # error: Argument "cellText" to "table" has incompatible type "ndarray[Any, + # Any]"; expected "Sequence[Sequence[str]] | None" + return matplotlib.table.table( + ax, + cellText=cellText, # type: ignore[arg-type] + rowLabels=rowLabels, + colLabels=colLabels, + **kwargs, + ) + + +def _get_layout( + nplots: int, + layout: tuple[int, int] | None = None, + layout_type: str = "box", +) -> tuple[int, int]: + if layout is not None: + if not isinstance(layout, (tuple, list)) or len(layout) != 2: + raise ValueError("Layout must be a tuple of (rows, columns)") + + nrows, ncols = layout + + if nrows == -1 and ncols > 0: + layout = nrows, ncols = (ceil(nplots / ncols), ncols) + elif ncols == -1 and nrows > 0: + layout = nrows, ncols = (nrows, ceil(nplots / nrows)) + elif ncols <= 0 and nrows <= 0: + msg = "At least one dimension of layout must be positive" + raise ValueError(msg) + + if nrows * ncols < nplots: + raise ValueError( + f"Layout of {nrows}x{ncols} must be larger than required size {nplots}" + ) + + return layout + + if layout_type == "single": + return (1, 1) + elif layout_type == "horizontal": + return (1, nplots) + elif layout_type == "vertical": + return (nplots, 1) + + layouts = {1: (1, 1), 2: (1, 2), 3: (2, 2), 4: (2, 2)} + try: + return layouts[nplots] + except KeyError: + k = 1 + while k**2 < nplots: + k += 1 + + if (k - 1) * k >= nplots: + return k, (k - 1) + else: + return k, k + + +# copied from matplotlib/pyplot.py and modified for pandas.plotting + + +def create_subplots( + naxes: int, + sharex: bool = False, + sharey: bool = False, + squeeze: bool = True, + subplot_kw=None, + ax=None, + layout=None, + layout_type: str = "box", + **fig_kw, +): + """ + Create a figure with a set of subplots already made. + + This utility wrapper makes it convenient to create common layouts of + subplots, including the enclosing figure object, in a single call. + + Parameters + ---------- + naxes : int + Number of required axes. Exceeded axes are set invisible. Default is + nrows * ncols. + + sharex : bool + If True, the X axis will be shared amongst all subplots. + + sharey : bool + If True, the Y axis will be shared amongst all subplots. + + squeeze : bool + + If True, extra dimensions are squeezed out from the returned axis object: + - if only one subplot is constructed (nrows=ncols=1), the resulting + single Axis object is returned as a scalar. + - for Nx1 or 1xN subplots, the returned object is a 1-d numpy object + array of Axis objects are returned as numpy 1-d arrays. + - for NxM subplots with N>1 and M>1 are returned as a 2d array. + + If False, no squeezing is done: the returned axis object is always + a 2-d array containing Axis instances, even if it ends up being 1x1. + + subplot_kw : dict + Dict with keywords passed to the add_subplot() call used to create each + subplots. + + ax : Matplotlib axis object, optional + + layout : tuple + Number of rows and columns of the subplot grid. + If not specified, calculated from naxes and layout_type + + layout_type : {'box', 'horizontal', 'vertical'}, default 'box' + Specify how to layout the subplot grid. + + fig_kw : Other keyword arguments to be passed to the figure() call. + Note that all keywords not recognized above will be + automatically included here. + + Returns + ------- + fig, ax : tuple + - fig is the Matplotlib Figure object + - ax can be either a single axis object or an array of axis objects if + more than one subplot was created. The dimensions of the resulting array + can be controlled with the squeeze keyword, see above. + + Examples + -------- + x = np.linspace(0, 2*np.pi, 400) + y = np.sin(x**2) + + # Just a figure and one subplot + f, ax = plt.subplots() + ax.plot(x, y) + ax.set_title('Simple plot') + + # Two subplots, unpack the output array immediately + f, (ax1, ax2) = plt.subplots(1, 2, sharey=True) + ax1.plot(x, y) + ax1.set_title('Sharing Y axis') + ax2.scatter(x, y) + + # Four polar axes + plt.subplots(2, 2, subplot_kw=dict(polar=True)) + """ + import matplotlib.pyplot as plt + + if subplot_kw is None: + subplot_kw = {} + + if ax is None: + fig = plt.figure(**fig_kw) + else: + if is_list_like(ax): + if squeeze: + ax = flatten_axes(ax) + if layout is not None: + warnings.warn( + "When passing multiple axes, layout keyword is ignored.", + UserWarning, + stacklevel=find_stack_level(), + ) + if sharex or sharey: + warnings.warn( + "When passing multiple axes, sharex and sharey " + "are ignored. These settings must be specified when creating axes.", + UserWarning, + stacklevel=find_stack_level(), + ) + if ax.size == naxes: + fig = ax.flat[0].get_figure() + return fig, ax + else: + raise ValueError( + f"The number of passed axes must be {naxes}, the " + "same as the output plot" + ) + + fig = ax.get_figure() + # if ax is passed and a number of subplots is 1, return ax as it is + if naxes == 1: + if squeeze: + return fig, ax + else: + return fig, flatten_axes(ax) + else: + warnings.warn( + "To output multiple subplots, the figure containing " + "the passed axes is being cleared.", + UserWarning, + stacklevel=find_stack_level(), + ) + fig.clear() + + nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type) + nplots = nrows * ncols + + # Create empty object array to hold all axes. It's easiest to make it 1-d + # so we can just append subplots upon creation, and then + axarr = np.empty(nplots, dtype=object) + + # Create first subplot separately, so we can share it if requested + ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw) + + if sharex: + subplot_kw["sharex"] = ax0 + if sharey: + subplot_kw["sharey"] = ax0 + axarr[0] = ax0 + + # Note off-by-one counting because add_subplot uses the MATLAB 1-based + # convention. + for i in range(1, nplots): + kwds = subplot_kw.copy() + # Set sharex and sharey to None for blank/dummy axes, these can + # interfere with proper axis limits on the visible axes if + # they share axes e.g. issue #7528 + if i >= naxes: + kwds["sharex"] = None + kwds["sharey"] = None + ax = fig.add_subplot(nrows, ncols, i + 1, **kwds) + axarr[i] = ax + + if naxes != nplots: + for ax in axarr[naxes:]: + ax.set_visible(False) + + handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey) + + if squeeze: + # Reshape the array to have the final desired dimension (nrow,ncol), + # though discarding unneeded dimensions that equal 1. If we only have + # one subplot, just return it instead of a 1-element array. + if nplots == 1: + axes = axarr[0] + else: + axes = axarr.reshape(nrows, ncols).squeeze() + else: + # returned axis array will be always 2-d, even if nrows=ncols=1 + axes = axarr.reshape(nrows, ncols) + + return fig, axes + + +def _remove_labels_from_axis(axis: Axis) -> None: + for t in axis.get_majorticklabels(): + t.set_visible(False) + + # set_visible will not be effective if + # minor axis has NullLocator and NullFormatter (default) + if isinstance(axis.get_minor_locator(), ticker.NullLocator): + axis.set_minor_locator(ticker.AutoLocator()) + if isinstance(axis.get_minor_formatter(), ticker.NullFormatter): + axis.set_minor_formatter(ticker.FormatStrFormatter("")) + for t in axis.get_minorticklabels(): + t.set_visible(False) + + axis.get_label().set_visible(False) + + +def _has_externally_shared_axis(ax1: Axes, compare_axis: str) -> bool: + """ + Return whether an axis is externally shared. + + Parameters + ---------- + ax1 : matplotlib.axes.Axes + Axis to query. + compare_axis : str + `"x"` or `"y"` according to whether the X-axis or Y-axis is being + compared. + + Returns + ------- + bool + `True` if the axis is externally shared. Otherwise `False`. + + Notes + ----- + If two axes with different positions are sharing an axis, they can be + referred to as *externally* sharing the common axis. + + If two axes sharing an axis also have the same position, they can be + referred to as *internally* sharing the common axis (a.k.a twinning). + + _handle_shared_axes() is only interested in axes externally sharing an + axis, regardless of whether either of the axes is also internally sharing + with a third axis. + """ + if compare_axis == "x": + axes = ax1.get_shared_x_axes() + elif compare_axis == "y": + axes = ax1.get_shared_y_axes() + else: + raise ValueError( + "_has_externally_shared_axis() needs 'x' or 'y' as a second parameter" + ) + + axes_siblings = axes.get_siblings(ax1) + + # Retain ax1 and any of its siblings which aren't in the same position as it + ax1_points = ax1.get_position().get_points() + + for ax2 in axes_siblings: + if not np.array_equal(ax1_points, ax2.get_position().get_points()): + return True + + return False + + +def handle_shared_axes( + axarr: Iterable[Axes], + nplots: int, + naxes: int, + nrows: int, + ncols: int, + sharex: bool, + sharey: bool, +) -> None: + if nplots > 1: + row_num = lambda x: x.get_subplotspec().rowspan.start + col_num = lambda x: x.get_subplotspec().colspan.start + + is_first_col = lambda x: x.get_subplotspec().is_first_col() + + if nrows > 1: + try: + # first find out the ax layout, + # so that we can correctly handle 'gaps" + layout = np.zeros((nrows + 1, ncols + 1), dtype=np.bool_) + for ax in axarr: + layout[row_num(ax), col_num(ax)] = ax.get_visible() + + for ax in axarr: + # only the last row of subplots should get x labels -> all + # other off layout handles the case that the subplot is + # the last in the column, because below is no subplot/gap. + if not layout[row_num(ax) + 1, col_num(ax)]: + continue + if sharex or _has_externally_shared_axis(ax, "x"): + _remove_labels_from_axis(ax.xaxis) + + except IndexError: + # if gridspec is used, ax.rowNum and ax.colNum may different + # from layout shape. in this case, use last_row logic + is_last_row = lambda x: x.get_subplotspec().is_last_row() + for ax in axarr: + if is_last_row(ax): + continue + if sharex or _has_externally_shared_axis(ax, "x"): + _remove_labels_from_axis(ax.xaxis) + + if ncols > 1: + for ax in axarr: + # only the first column should get y labels -> set all other to + # off as we only have labels in the first column and we always + # have a subplot there, we can skip the layout test + if is_first_col(ax): + continue + if sharey or _has_externally_shared_axis(ax, "y"): + _remove_labels_from_axis(ax.yaxis) + + +def flatten_axes(axes: Axes | Sequence[Axes]) -> np.ndarray: + if not is_list_like(axes): + return np.array([axes]) + elif isinstance(axes, (np.ndarray, ABCIndex)): + return np.asarray(axes).ravel() + return np.array(axes) + + +def set_ticks_props( + axes: Axes | Sequence[Axes], + xlabelsize: int | None = None, + xrot=None, + ylabelsize: int | None = None, + yrot=None, +): + import matplotlib.pyplot as plt + + for ax in flatten_axes(axes): + if xlabelsize is not None: + plt.setp(ax.get_xticklabels(), fontsize=xlabelsize) + if xrot is not None: + plt.setp(ax.get_xticklabels(), rotation=xrot) + if ylabelsize is not None: + plt.setp(ax.get_yticklabels(), fontsize=ylabelsize) + if yrot is not None: + plt.setp(ax.get_yticklabels(), rotation=yrot) + return axes + + +def get_all_lines(ax: Axes) -> list[Line2D]: + lines = ax.get_lines() + + if hasattr(ax, "right_ax"): + lines += ax.right_ax.get_lines() + + if hasattr(ax, "left_ax"): + lines += ax.left_ax.get_lines() + + return lines + + +def get_xlim(lines: Iterable[Line2D]) -> tuple[float, float]: + left, right = np.inf, -np.inf + for line in lines: + x = line.get_xdata(orig=False) + left = min(np.nanmin(x), left) + right = max(np.nanmax(x), right) + return left, right