Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- evalkit_cambrian/lib/python3.10/site-packages/exceptiongroup-1.2.2.dist-info/LICENSE +73 -0
- evalkit_cambrian/lib/python3.10/site-packages/exceptiongroup-1.2.2.dist-info/METADATA +157 -0
- evalkit_cambrian/lib/python3.10/site-packages/exceptiongroup-1.2.2.dist-info/REQUESTED +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/exceptiongroup-1.2.2.dist-info/WHEEL +4 -0
- evalkit_cambrian/lib/python3.10/site-packages/flash_attn-2.5.8.dist-info/AUTHORS +1 -0
- evalkit_cambrian/lib/python3.10/site-packages/flash_attn-2.5.8.dist-info/INSTALLER +1 -0
- evalkit_cambrian/lib/python3.10/site-packages/flash_attn-2.5.8.dist-info/LICENSE +29 -0
- evalkit_cambrian/lib/python3.10/site-packages/flash_attn-2.5.8.dist-info/METADATA +430 -0
- evalkit_cambrian/lib/python3.10/site-packages/flash_attn-2.5.8.dist-info/RECORD +103 -0
- evalkit_cambrian/lib/python3.10/site-packages/flash_attn-2.5.8.dist-info/REQUESTED +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/flash_attn-2.5.8.dist-info/WHEEL +5 -0
- evalkit_cambrian/lib/python3.10/site-packages/flash_attn-2.5.8.dist-info/top_level.txt +2 -0
- evalkit_cambrian/lib/python3.10/site-packages/jinja2-3.1.5.dist-info/RECORD +58 -0
- evalkit_cambrian/lib/python3.10/site-packages/jinja2-3.1.5.dist-info/entry_points.txt +3 -0
- evalkit_cambrian/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft201909/metaschema.json +42 -0
- evalkit_cambrian/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/meta-data +37 -0
- evalkit_cambrian/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/applicator +48 -0
- evalkit_cambrian/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/content +17 -0
- evalkit_cambrian/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/core +51 -0
- evalkit_cambrian/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format +14 -0
- evalkit_cambrian/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format-assertion +14 -0
- evalkit_cambrian/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/meta-data +37 -0
- evalkit_cambrian/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/validation +98 -0
- evalkit_cambrian/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft3/metaschema.json +172 -0
- evalkit_cambrian/lib/python3.10/site-packages/torchvision-0.17.0+cu118.dist-info/INSTALLER +1 -0
- evalkit_cambrian/lib/python3.10/site-packages/torchvision-0.17.0+cu118.dist-info/METADATA +168 -0
- evalkit_cambrian/lib/python3.10/site-packages/torchvision-0.17.0+cu118.dist-info/RECORD +384 -0
- evalkit_cambrian/lib/python3.10/site-packages/torchvision-0.17.0+cu118.dist-info/REQUESTED +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/torchvision-0.17.0+cu118.dist-info/WHEEL +5 -0
- evalkit_cambrian/lib/python3.10/site-packages/torchvision-0.17.0+cu118.dist-info/top_level.txt +1 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/__pycache__/testing.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/language/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/language/__pycache__/core.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/language/__pycache__/math.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/language/__pycache__/random.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/language/__pycache__/semantic.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/language/__pycache__/standard.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/language/extra/__init__.py +3 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/language/extra/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/language/extra/__pycache__/cuda.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/language/extra/cuda.py +18 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/language/semantic.py +1565 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/language/standard.py +404 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/runtime/__init__.py +18 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/runtime/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/runtime/__pycache__/cache.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/runtime/__pycache__/driver.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/runtime/__pycache__/errors.cpython-310.pyc +0 -0
- evalkit_cambrian/lib/python3.10/site-packages/triton/runtime/__pycache__/interpreter.cpython-310.pyc +0 -0
evalkit_cambrian/lib/python3.10/site-packages/exceptiongroup-1.2.2.dist-info/LICENSE
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
The MIT License (MIT)
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2022 Alex Grönholm
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
| 6 |
+
this software and associated documentation files (the "Software"), to deal in
|
| 7 |
+
the Software without restriction, including without limitation the rights to
|
| 8 |
+
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
| 9 |
+
the Software, and to permit persons to whom the Software is furnished to do so,
|
| 10 |
+
subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
| 17 |
+
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
| 18 |
+
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
| 19 |
+
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
| 20 |
+
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
This project contains code copied from the Python standard library.
|
| 24 |
+
The following is the required license notice for those parts.
|
| 25 |
+
|
| 26 |
+
PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
|
| 27 |
+
--------------------------------------------
|
| 28 |
+
|
| 29 |
+
1. This LICENSE AGREEMENT is between the Python Software Foundation
|
| 30 |
+
("PSF"), and the Individual or Organization ("Licensee") accessing and
|
| 31 |
+
otherwise using this software ("Python") in source or binary form and
|
| 32 |
+
its associated documentation.
|
| 33 |
+
|
| 34 |
+
2. Subject to the terms and conditions of this License Agreement, PSF hereby
|
| 35 |
+
grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
|
| 36 |
+
analyze, test, perform and/or display publicly, prepare derivative works,
|
| 37 |
+
distribute, and otherwise use Python alone or in any derivative version,
|
| 38 |
+
provided, however, that PSF's License Agreement and PSF's notice of copyright,
|
| 39 |
+
i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
|
| 40 |
+
2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022 Python Software Foundation;
|
| 41 |
+
All Rights Reserved" are retained in Python alone or in any derivative version
|
| 42 |
+
prepared by Licensee.
|
| 43 |
+
|
| 44 |
+
3. In the event Licensee prepares a derivative work that is based on
|
| 45 |
+
or incorporates Python or any part thereof, and wants to make
|
| 46 |
+
the derivative work available to others as provided herein, then
|
| 47 |
+
Licensee hereby agrees to include in any such work a brief summary of
|
| 48 |
+
the changes made to Python.
|
| 49 |
+
|
| 50 |
+
4. PSF is making Python available to Licensee on an "AS IS"
|
| 51 |
+
basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
| 52 |
+
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
|
| 53 |
+
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
| 54 |
+
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
|
| 55 |
+
INFRINGE ANY THIRD PARTY RIGHTS.
|
| 56 |
+
|
| 57 |
+
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
|
| 58 |
+
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
|
| 59 |
+
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
|
| 60 |
+
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
| 61 |
+
|
| 62 |
+
6. This License Agreement will automatically terminate upon a material
|
| 63 |
+
breach of its terms and conditions.
|
| 64 |
+
|
| 65 |
+
7. Nothing in this License Agreement shall be deemed to create any
|
| 66 |
+
relationship of agency, partnership, or joint venture between PSF and
|
| 67 |
+
Licensee. This License Agreement does not grant permission to use PSF
|
| 68 |
+
trademarks or trade name in a trademark sense to endorse or promote
|
| 69 |
+
products or services of Licensee, or any third party.
|
| 70 |
+
|
| 71 |
+
8. By copying, installing or otherwise using Python, Licensee
|
| 72 |
+
agrees to be bound by the terms and conditions of this License
|
| 73 |
+
Agreement.
|
evalkit_cambrian/lib/python3.10/site-packages/exceptiongroup-1.2.2.dist-info/METADATA
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: exceptiongroup
|
| 3 |
+
Version: 1.2.2
|
| 4 |
+
Summary: Backport of PEP 654 (exception groups)
|
| 5 |
+
Author-email: Alex Grönholm <alex.gronholm@nextday.fi>
|
| 6 |
+
Requires-Python: >=3.7
|
| 7 |
+
Description-Content-Type: text/x-rst
|
| 8 |
+
Classifier: Development Status :: 5 - Production/Stable
|
| 9 |
+
Classifier: Intended Audience :: Developers
|
| 10 |
+
Classifier: License :: OSI Approved :: MIT License
|
| 11 |
+
Classifier: Programming Language :: Python
|
| 12 |
+
Classifier: Programming Language :: Python :: 3 :: Only
|
| 13 |
+
Classifier: Typing :: Typed
|
| 14 |
+
Requires-Dist: pytest >= 6 ; extra == "test"
|
| 15 |
+
Project-URL: Changelog, https://github.com/agronholm/exceptiongroup/blob/main/CHANGES.rst
|
| 16 |
+
Project-URL: Issue Tracker, https://github.com/agronholm/exceptiongroup/issues
|
| 17 |
+
Project-URL: Source code, https://github.com/agronholm/exceptiongroup
|
| 18 |
+
Provides-Extra: test
|
| 19 |
+
|
| 20 |
+
.. image:: https://github.com/agronholm/exceptiongroup/actions/workflows/test.yml/badge.svg
|
| 21 |
+
:target: https://github.com/agronholm/exceptiongroup/actions/workflows/test.yml
|
| 22 |
+
:alt: Build Status
|
| 23 |
+
.. image:: https://coveralls.io/repos/github/agronholm/exceptiongroup/badge.svg?branch=main
|
| 24 |
+
:target: https://coveralls.io/github/agronholm/exceptiongroup?branch=main
|
| 25 |
+
:alt: Code Coverage
|
| 26 |
+
|
| 27 |
+
This is a backport of the ``BaseExceptionGroup`` and ``ExceptionGroup`` classes from
|
| 28 |
+
Python 3.11.
|
| 29 |
+
|
| 30 |
+
It contains the following:
|
| 31 |
+
|
| 32 |
+
* The ``exceptiongroup.BaseExceptionGroup`` and ``exceptiongroup.ExceptionGroup``
|
| 33 |
+
classes
|
| 34 |
+
* A utility function (``exceptiongroup.catch()``) for catching exceptions possibly
|
| 35 |
+
nested in an exception group
|
| 36 |
+
* Patches to the ``TracebackException`` class that properly formats exception groups
|
| 37 |
+
(installed on import)
|
| 38 |
+
* An exception hook that handles formatting of exception groups through
|
| 39 |
+
``TracebackException`` (installed on import)
|
| 40 |
+
* Special versions of some of the functions from the ``traceback`` module, modified to
|
| 41 |
+
correctly handle exception groups even when monkey patching is disabled, or blocked by
|
| 42 |
+
another custom exception hook:
|
| 43 |
+
|
| 44 |
+
* ``traceback.format_exception()``
|
| 45 |
+
* ``traceback.format_exception_only()``
|
| 46 |
+
* ``traceback.print_exception()``
|
| 47 |
+
* ``traceback.print_exc()``
|
| 48 |
+
* A backported version of ``contextlib.suppress()`` from Python 3.12.1 which also
|
| 49 |
+
handles suppressing exceptions inside exception groups
|
| 50 |
+
|
| 51 |
+
If this package is imported on Python 3.11 or later, the built-in implementations of the
|
| 52 |
+
exception group classes are used instead, ``TracebackException`` is not monkey patched
|
| 53 |
+
and the exception hook won't be installed.
|
| 54 |
+
|
| 55 |
+
See the `standard library documentation`_ for more information on exception groups.
|
| 56 |
+
|
| 57 |
+
.. _standard library documentation: https://docs.python.org/3/library/exceptions.html
|
| 58 |
+
|
| 59 |
+
Catching exceptions
|
| 60 |
+
===================
|
| 61 |
+
|
| 62 |
+
Due to the lack of the ``except*`` syntax introduced by `PEP 654`_ in earlier Python
|
| 63 |
+
versions, you need to use ``exceptiongroup.catch()`` to catch exceptions that are
|
| 64 |
+
potentially nested inside an exception group. This function returns a context manager
|
| 65 |
+
that calls the given handler for any exceptions matching the sole argument.
|
| 66 |
+
|
| 67 |
+
The argument to ``catch()`` must be a dict (or any ``Mapping``) where each key is either
|
| 68 |
+
an exception class or an iterable of exception classes. Each value must be a callable
|
| 69 |
+
that takes a single positional argument. The handler will be called at most once, with
|
| 70 |
+
an exception group as an argument which will contain all the exceptions that are any
|
| 71 |
+
of the given types, or their subclasses. The exception group may contain nested groups
|
| 72 |
+
containing more matching exceptions.
|
| 73 |
+
|
| 74 |
+
Thus, the following Python 3.11+ code:
|
| 75 |
+
|
| 76 |
+
.. code-block:: python
|
| 77 |
+
|
| 78 |
+
try:
|
| 79 |
+
...
|
| 80 |
+
except* (ValueError, KeyError) as excgroup:
|
| 81 |
+
for exc in excgroup.exceptions:
|
| 82 |
+
print('Caught exception:', type(exc))
|
| 83 |
+
except* RuntimeError:
|
| 84 |
+
print('Caught runtime error')
|
| 85 |
+
|
| 86 |
+
would be written with this backport like this:
|
| 87 |
+
|
| 88 |
+
.. code-block:: python
|
| 89 |
+
|
| 90 |
+
from exceptiongroup import BaseExceptionGroup, catch
|
| 91 |
+
|
| 92 |
+
def value_key_err_handler(excgroup: BaseExceptionGroup) -> None:
|
| 93 |
+
for exc in excgroup.exceptions:
|
| 94 |
+
print('Caught exception:', type(exc))
|
| 95 |
+
|
| 96 |
+
def runtime_err_handler(exc: BaseExceptionGroup) -> None:
|
| 97 |
+
print('Caught runtime error')
|
| 98 |
+
|
| 99 |
+
with catch({
|
| 100 |
+
(ValueError, KeyError): value_key_err_handler,
|
| 101 |
+
RuntimeError: runtime_err_handler
|
| 102 |
+
}):
|
| 103 |
+
...
|
| 104 |
+
|
| 105 |
+
**NOTE**: Just like with ``except*``, you cannot handle ``BaseExceptionGroup`` or
|
| 106 |
+
``ExceptionGroup`` with ``catch()``.
|
| 107 |
+
|
| 108 |
+
Suppressing exceptions
|
| 109 |
+
======================
|
| 110 |
+
|
| 111 |
+
This library contains a backport of the ``contextlib.suppress()`` context manager from
|
| 112 |
+
Python 3.12.1. It allows you to selectively ignore certain exceptions, even when they're
|
| 113 |
+
inside exception groups:
|
| 114 |
+
|
| 115 |
+
.. code-block:: python
|
| 116 |
+
|
| 117 |
+
from exceptiongroup import suppress
|
| 118 |
+
|
| 119 |
+
with suppress(RuntimeError):
|
| 120 |
+
raise ExceptionGroup("", [RuntimeError("boo")])
|
| 121 |
+
|
| 122 |
+
Notes on monkey patching
|
| 123 |
+
========================
|
| 124 |
+
|
| 125 |
+
To make exception groups render properly when an unhandled exception group is being
|
| 126 |
+
printed out, this package does two things when it is imported on any Python version
|
| 127 |
+
earlier than 3.11:
|
| 128 |
+
|
| 129 |
+
#. The ``traceback.TracebackException`` class is monkey patched to store extra
|
| 130 |
+
information about exception groups (in ``__init__()``) and properly format them (in
|
| 131 |
+
``format()``)
|
| 132 |
+
#. An exception hook is installed at ``sys.excepthook``, provided that no other hook is
|
| 133 |
+
already present. This hook causes the exception to be formatted using
|
| 134 |
+
``traceback.TracebackException`` rather than the built-in rendered.
|
| 135 |
+
|
| 136 |
+
If ``sys.exceptionhook`` is found to be set to something else than the default when
|
| 137 |
+
``exceptiongroup`` is imported, no monkeypatching is done at all.
|
| 138 |
+
|
| 139 |
+
To prevent the exception hook and patches from being installed, set the environment
|
| 140 |
+
variable ``EXCEPTIONGROUP_NO_PATCH`` to ``1``.
|
| 141 |
+
|
| 142 |
+
Formatting exception groups
|
| 143 |
+
---------------------------
|
| 144 |
+
|
| 145 |
+
Normally, the monkey patching applied by this library on import will cause exception
|
| 146 |
+
groups to be printed properly in tracebacks. But in cases when the monkey patching is
|
| 147 |
+
blocked by a third party exception hook, or monkey patching is explicitly disabled,
|
| 148 |
+
you can still manually format exceptions using the special versions of the ``traceback``
|
| 149 |
+
functions, like ``format_exception()``, listed at the top of this page. They work just
|
| 150 |
+
like their counterparts in the ``traceback`` module, except that they use a separately
|
| 151 |
+
patched subclass of ``TracebackException`` to perform the rendering.
|
| 152 |
+
|
| 153 |
+
Particularly in cases where a library installs its own exception hook, it is recommended
|
| 154 |
+
to use these special versions to do the actual formatting of exceptions/tracebacks.
|
| 155 |
+
|
| 156 |
+
.. _PEP 654: https://www.python.org/dev/peps/pep-0654/
|
| 157 |
+
|
evalkit_cambrian/lib/python3.10/site-packages/exceptiongroup-1.2.2.dist-info/REQUESTED
ADDED
|
File without changes
|
evalkit_cambrian/lib/python3.10/site-packages/exceptiongroup-1.2.2.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: flit 3.9.0
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-any
|
evalkit_cambrian/lib/python3.10/site-packages/flash_attn-2.5.8.dist-info/AUTHORS
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
Tri Dao, trid@cs.stanford.edu
|
evalkit_cambrian/lib/python3.10/site-packages/flash_attn-2.5.8.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
evalkit_cambrian/lib/python3.10/site-packages/flash_attn-2.5.8.dist-info/LICENSE
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
BSD 3-Clause License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2022, the respective contributors, as shown by the AUTHORS file.
|
| 4 |
+
All rights reserved.
|
| 5 |
+
|
| 6 |
+
Redistribution and use in source and binary forms, with or without
|
| 7 |
+
modification, are permitted provided that the following conditions are met:
|
| 8 |
+
|
| 9 |
+
* Redistributions of source code must retain the above copyright notice, this
|
| 10 |
+
list of conditions and the following disclaimer.
|
| 11 |
+
|
| 12 |
+
* Redistributions in binary form must reproduce the above copyright notice,
|
| 13 |
+
this list of conditions and the following disclaimer in the documentation
|
| 14 |
+
and/or other materials provided with the distribution.
|
| 15 |
+
|
| 16 |
+
* Neither the name of the copyright holder nor the names of its
|
| 17 |
+
contributors may be used to endorse or promote products derived from
|
| 18 |
+
this software without specific prior written permission.
|
| 19 |
+
|
| 20 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 21 |
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 22 |
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 23 |
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
| 24 |
+
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 25 |
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 26 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
| 27 |
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
| 28 |
+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 29 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
evalkit_cambrian/lib/python3.10/site-packages/flash_attn-2.5.8.dist-info/METADATA
ADDED
|
@@ -0,0 +1,430 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: flash-attn
|
| 3 |
+
Version: 2.5.8
|
| 4 |
+
Summary: Flash Attention: Fast and Memory-Efficient Exact Attention
|
| 5 |
+
Home-page: https://github.com/Dao-AILab/flash-attention
|
| 6 |
+
Author: Tri Dao
|
| 7 |
+
Author-email: trid@cs.stanford.edu
|
| 8 |
+
Classifier: Programming Language :: Python :: 3
|
| 9 |
+
Classifier: License :: OSI Approved :: BSD License
|
| 10 |
+
Classifier: Operating System :: Unix
|
| 11 |
+
Requires-Python: >=3.7
|
| 12 |
+
Description-Content-Type: text/markdown
|
| 13 |
+
License-File: LICENSE
|
| 14 |
+
License-File: AUTHORS
|
| 15 |
+
Requires-Dist: torch
|
| 16 |
+
Requires-Dist: einops
|
| 17 |
+
Requires-Dist: packaging
|
| 18 |
+
Requires-Dist: ninja
|
| 19 |
+
|
| 20 |
+
# FlashAttention
|
| 21 |
+
This repository provides the official implementation of FlashAttention and
|
| 22 |
+
FlashAttention-2 from the
|
| 23 |
+
following papers.
|
| 24 |
+
|
| 25 |
+
**FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness**
|
| 26 |
+
Tri Dao, Daniel Y. Fu, Stefano Ermon, Atri Rudra, Christopher Ré
|
| 27 |
+
Paper: https://arxiv.org/abs/2205.14135
|
| 28 |
+
IEEE Spectrum [article](https://spectrum.ieee.org/mlperf-rankings-2022) about our submission to the MLPerf 2.0 benchmark using FlashAttention.
|
| 29 |
+

|
| 30 |
+
|
| 31 |
+
**FlashAttention-2: Faster Attention with Better Parallelism and Work Partitioning**
|
| 32 |
+
Tri Dao
|
| 33 |
+
|
| 34 |
+
Paper: https://tridao.me/publications/flash2/flash2.pdf
|
| 35 |
+
|
| 36 |
+

|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
## Usage
|
| 40 |
+
|
| 41 |
+
We've been very happy to see FlashAttention being widely adopted in such a short
|
| 42 |
+
time after its release. This [page](https://github.com/Dao-AILab/flash-attention/blob/main/usage.md)
|
| 43 |
+
contains a partial list of places where FlashAttention is being used.
|
| 44 |
+
|
| 45 |
+
FlashAttention and FlashAttention-2 are free to use and modify (see LICENSE).
|
| 46 |
+
Please cite and credit FlashAttention if you use it.
|
| 47 |
+
|
| 48 |
+
## Installation and features
|
| 49 |
+
|
| 50 |
+
Requirements:
|
| 51 |
+
- CUDA 11.6 and above.
|
| 52 |
+
- PyTorch 1.12 and above.
|
| 53 |
+
- Linux. Might work for Windows starting v2.3.2 (we've seen a few positive [reports](https://github.com/Dao-AILab/flash-attention/issues/595)) but Windows compilation still requires more testing. If you have ideas on how to set up prebuilt CUDA wheels for Windows, please reach out via Github issue.
|
| 54 |
+
|
| 55 |
+
We recommend the
|
| 56 |
+
[Pytorch](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch)
|
| 57 |
+
container from Nvidia, which has all the required tools to install FlashAttention.
|
| 58 |
+
|
| 59 |
+
To install:
|
| 60 |
+
1. Make sure that PyTorch is installed.
|
| 61 |
+
2. Make sure that `packaging` is installed (`pip install packaging`)
|
| 62 |
+
3. Make sure that `ninja` is installed and that it works correctly (e.g. `ninja
|
| 63 |
+
--version` then `echo $?` should return exit code 0). If not (sometimes `ninja
|
| 64 |
+
--version` then `echo $?` returns a nonzero exit code), uninstall then reinstall
|
| 65 |
+
`ninja` (`pip uninstall -y ninja && pip install ninja`). Without `ninja`,
|
| 66 |
+
compiling can take a very long time (2h) since it does not use multiple CPU
|
| 67 |
+
cores. With `ninja` compiling takes 3-5 minutes on a 64-core machine.
|
| 68 |
+
4. Then:
|
| 69 |
+
```sh
|
| 70 |
+
pip install flash-attn --no-build-isolation
|
| 71 |
+
```
|
| 72 |
+
Alternatively you can compile from source:
|
| 73 |
+
```sh
|
| 74 |
+
python setup.py install
|
| 75 |
+
```
|
| 76 |
+
|
| 77 |
+
If your machine has less than 96GB of RAM and lots of CPU cores, `ninja` might
|
| 78 |
+
run too many parallel compilation jobs that could exhaust the amount of RAM. To
|
| 79 |
+
limit the number of parallel compilation jobs, you can set the environment
|
| 80 |
+
variable `MAX_JOBS`:
|
| 81 |
+
```sh
|
| 82 |
+
MAX_JOBS=4 pip install flash-attn --no-build-isolation
|
| 83 |
+
```
|
| 84 |
+
|
| 85 |
+
Interface: `src/flash_attention_interface.py`
|
| 86 |
+
|
| 87 |
+
FlashAttention-2 currently supports:
|
| 88 |
+
1. Ampere, Ada, or Hopper GPUs (e.g., A100, RTX 3090, RTX 4090, H100). Support for Turing
|
| 89 |
+
GPUs (T4, RTX 2080) is coming soon, please use FlashAttention 1.x for Turing
|
| 90 |
+
GPUs for now.
|
| 91 |
+
2. Datatype fp16 and bf16 (bf16 requires Ampere, Ada, or Hopper GPUs).
|
| 92 |
+
3. All head dimensions up to 256. ~~Head dim > 192 backward requires A100/A800 or H100/H800~~. Head dim 256 backward now works on consumer GPUs (if there's no dropout) as of flash-attn 2.5.5.
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
## How to use FlashAttention
|
| 96 |
+
|
| 97 |
+
The main functions implement scaled dot product attention (softmax(Q @ K^T *
|
| 98 |
+
softmax_scale) @ V):
|
| 99 |
+
```python
|
| 100 |
+
from flash_attn import flash_attn_qkvpacked_func, flash_attn_func
|
| 101 |
+
```
|
| 102 |
+
|
| 103 |
+
```python
|
| 104 |
+
flash_attn_qkvpacked_func(qkv, dropout_p=0.0, softmax_scale=None, causal=False,
|
| 105 |
+
window_size=(-1, -1), alibi_slopes=None, deterministic=False):
|
| 106 |
+
"""dropout_p should be set to 0.0 during evaluation
|
| 107 |
+
If Q, K, V are already stacked into 1 tensor, this function will be faster than
|
| 108 |
+
calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation
|
| 109 |
+
of the gradients of Q, K, V.
|
| 110 |
+
If window_size != (-1, -1), implements sliding window local attention. Query at position i
|
| 111 |
+
will only attend to keys between [i - window_size[0], i + window_size[1]] inclusive.
|
| 112 |
+
Arguments:
|
| 113 |
+
qkv: (batch_size, seqlen, 3, nheads, headdim)
|
| 114 |
+
dropout_p: float. Dropout probability.
|
| 115 |
+
softmax_scale: float. The scaling of QK^T before applying softmax.
|
| 116 |
+
Default to 1 / sqrt(headdim).
|
| 117 |
+
causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
|
| 118 |
+
window_size: (left, right). If not (-1, -1), implements sliding window local attention.
|
| 119 |
+
alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of (-alibi_slope * |i - j|) is added to
|
| 120 |
+
the attention score of query i and key j.
|
| 121 |
+
deterministic: bool. Whether to use the deterministic implementation of the backward pass,
|
| 122 |
+
which is slightly slower and uses more memory. The forward pass is always deterministic.
|
| 123 |
+
Return:
|
| 124 |
+
out: (batch_size, seqlen, nheads, headdim).
|
| 125 |
+
"""
|
| 126 |
+
```
|
| 127 |
+
|
| 128 |
+
```python
|
| 129 |
+
flash_attn_func(q, k, v, dropout_p=0.0, softmax_scale=None, causal=False,
|
| 130 |
+
window_size=(-1, -1), alibi_slopes=None, deterministic=False):
|
| 131 |
+
"""dropout_p should be set to 0.0 during evaluation
|
| 132 |
+
Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
|
| 133 |
+
than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
|
| 134 |
+
For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
|
| 135 |
+
0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
|
| 136 |
+
If window_size != (-1, -1), implements sliding window local attention. Query at position i
|
| 137 |
+
will only attend to keys between
|
| 138 |
+
[i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
|
| 139 |
+
|
| 140 |
+
Arguments:
|
| 141 |
+
q: (batch_size, seqlen, nheads, headdim)
|
| 142 |
+
k: (batch_size, seqlen, nheads_k, headdim)
|
| 143 |
+
v: (batch_size, seqlen, nheads_k, headdim)
|
| 144 |
+
dropout_p: float. Dropout probability.
|
| 145 |
+
softmax_scale: float. The scaling of QK^T before applying softmax.
|
| 146 |
+
Default to 1 / sqrt(headdim).
|
| 147 |
+
causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
|
| 148 |
+
window_size: (left, right). If not (-1, -1), implements sliding window local attention.
|
| 149 |
+
alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
|
| 150 |
+
(-alibi_slope * |i + seqlen_k - seqlen_q - j|)
|
| 151 |
+
is added to the attention score of query i and key j.
|
| 152 |
+
deterministic: bool. Whether to use the deterministic implementation of the backward pass,
|
| 153 |
+
which is slightly slower and uses more memory. The forward pass is always deterministic.
|
| 154 |
+
Return:
|
| 155 |
+
out: (batch_size, seqlen, nheads, headdim).
|
| 156 |
+
"""
|
| 157 |
+
```
|
| 158 |
+
|
| 159 |
+
```python
|
| 160 |
+
def flash_attn_with_kvcache(
|
| 161 |
+
q,
|
| 162 |
+
k_cache,
|
| 163 |
+
v_cache,
|
| 164 |
+
k=None,
|
| 165 |
+
v=None,
|
| 166 |
+
rotary_cos=None,
|
| 167 |
+
rotary_sin=None,
|
| 168 |
+
cache_seqlens: Optional[Union[(int, torch.Tensor)]] = None,
|
| 169 |
+
cache_batch_idx: Optional[torch.Tensor] = None,
|
| 170 |
+
block_table: Optional[torch.Tensor] = None,
|
| 171 |
+
softmax_scale=None,
|
| 172 |
+
causal=False,
|
| 173 |
+
window_size=(-1, -1), # -1 means infinite context window
|
| 174 |
+
rotary_interleaved=True,
|
| 175 |
+
alibi_slopes=None,
|
| 176 |
+
):
|
| 177 |
+
"""
|
| 178 |
+
If k and v are not None, k_cache and v_cache will be updated *inplace* with the new values from
|
| 179 |
+
k and v. This is useful for incremental decoding: you can pass in the cached keys/values from
|
| 180 |
+
the previous step, and update them with the new keys/values from the current step, and do
|
| 181 |
+
attention with the updated cache, all in 1 kernel.
|
| 182 |
+
|
| 183 |
+
If you pass in k / v, you must make sure that the cache is large enough to hold the new values.
|
| 184 |
+
For example, the KV cache could be pre-allocated with the max sequence length, and you can use
|
| 185 |
+
cache_seqlens to keep track of the current sequence lengths of each sequence in the batch.
|
| 186 |
+
|
| 187 |
+
Also apply rotary embedding if rotary_cos and rotary_sin are passed in. The key @k will be
|
| 188 |
+
rotated by rotary_cos and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc.
|
| 189 |
+
If causal or local (i.e., window_size != (-1, -1)), the query @q will be rotated by rotary_cos
|
| 190 |
+
and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc.
|
| 191 |
+
If not causal and not local, the query @q will be rotated by rotary_cos and rotary_sin at
|
| 192 |
+
indices cache_seqlens only (i.e. we consider all tokens in @q to be at position cache_seqlens).
|
| 193 |
+
|
| 194 |
+
See tests/test_flash_attn.py::test_flash_attn_kvcache for examples of how to use this function.
|
| 195 |
+
|
| 196 |
+
Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
|
| 197 |
+
than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
|
| 198 |
+
For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
|
| 199 |
+
0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
|
| 200 |
+
|
| 201 |
+
If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
|
| 202 |
+
For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
|
| 203 |
+
1 1 1 1 0
|
| 204 |
+
1 1 1 1 1
|
| 205 |
+
If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
|
| 206 |
+
0 0
|
| 207 |
+
0 0
|
| 208 |
+
0 0
|
| 209 |
+
1 0
|
| 210 |
+
1 1
|
| 211 |
+
If the row of the mask is all zero, the output will be zero.
|
| 212 |
+
|
| 213 |
+
If window_size != (-1, -1), implements sliding window local attention. Query at position i
|
| 214 |
+
will only attend to keys between
|
| 215 |
+
[i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
|
| 216 |
+
|
| 217 |
+
Note: Does not support backward pass.
|
| 218 |
+
|
| 219 |
+
Arguments:
|
| 220 |
+
q: (batch_size, seqlen, nheads, headdim)
|
| 221 |
+
k_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim) if there's no block_table,
|
| 222 |
+
or (num_blocks, page_block_size, nheads_k, headdim) if there's a block_table (i.e. paged KV cache)
|
| 223 |
+
page_block_size must be a multiple of 256.
|
| 224 |
+
v_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim) if there's no block_table,
|
| 225 |
+
or (num_blocks, page_block_size, nheads_k, headdim) if there's a block_table (i.e. paged KV cache)
|
| 226 |
+
k [optional]: (batch_size, seqlen_new, nheads_k, headdim). If not None, we concatenate
|
| 227 |
+
k with k_cache, starting at the indices specified by cache_seqlens.
|
| 228 |
+
v [optional]: (batch_size, seqlen_new, nheads_k, headdim). Similar to k.
|
| 229 |
+
rotary_cos [optional]: (seqlen_ro, rotary_dim / 2). If not None, we apply rotary embedding
|
| 230 |
+
to k and q. Only applicable if k and v are passed in. rotary_dim must be divisible by 16.
|
| 231 |
+
rotary_sin [optional]: (seqlen_ro, rotary_dim / 2). Similar to rotary_cos.
|
| 232 |
+
cache_seqlens: int, or (batch_size,), dtype torch.int32. The sequence lengths of the
|
| 233 |
+
KV cache.
|
| 234 |
+
block_table [optional]: (batch_size, max_num_blocks_per_seq), dtype torch.int32.
|
| 235 |
+
cache_batch_idx: (batch_size,), dtype torch.int32. The indices used to index into the KV cache.
|
| 236 |
+
If None, we assume that the batch indices are [0, 1, 2, ..., batch_size - 1].
|
| 237 |
+
If the indices are not distinct, and k and v are provided, the values updated in the cache
|
| 238 |
+
might come from any of the duplicate indices.
|
| 239 |
+
softmax_scale: float. The scaling of QK^T before applying softmax.
|
| 240 |
+
Default to 1 / sqrt(headdim).
|
| 241 |
+
causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
|
| 242 |
+
window_size: (left, right). If not (-1, -1), implements sliding window local attention.
|
| 243 |
+
rotary_interleaved: bool. Only applicable if rotary_cos and rotary_sin are passed in.
|
| 244 |
+
If True, rotary embedding will combine dimensions 0 & 1, 2 & 3, etc. If False,
|
| 245 |
+
rotary embedding will combine dimensions 0 & rotary_dim / 2, 1 & rotary_dim / 2 + 1
|
| 246 |
+
(i.e. GPT-NeoX style).
|
| 247 |
+
alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
|
| 248 |
+
(-alibi_slope * |i + seqlen_k - seqlen_q - j|)
|
| 249 |
+
is added to the attention score of query i and key j.
|
| 250 |
+
|
| 251 |
+
Return:
|
| 252 |
+
out: (batch_size, seqlen, nheads, headdim).
|
| 253 |
+
"""
|
| 254 |
+
```
|
| 255 |
+
|
| 256 |
+
To see how these functions are used in a multi-head attention layer (which
|
| 257 |
+
includes QKV projection, output projection), see the MHA [implementation](https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/modules/mha.py).
|
| 258 |
+
|
| 259 |
+
## Changelog
|
| 260 |
+
|
| 261 |
+
### 2.0: Complete rewrite, 2x faster
|
| 262 |
+
Upgrading from FlashAttention (1.x) to FlashAttention-2
|
| 263 |
+
|
| 264 |
+
These functions have been renamed:
|
| 265 |
+
- `flash_attn_unpadded_func` -> `flash_attn_varlen_func`
|
| 266 |
+
- `flash_attn_unpadded_qkvpacked_func` -> `flash_attn_varlen_qkvpacked_func`
|
| 267 |
+
- `flash_attn_unpadded_kvpacked_func` -> `flash_attn_varlen_kvpacked_func`
|
| 268 |
+
|
| 269 |
+
If the inputs have the same sequence lengths in the same batch, it is simpler
|
| 270 |
+
and faster to use these functions:
|
| 271 |
+
```python
|
| 272 |
+
flash_attn_qkvpacked_func(qkv, dropout_p=0.0, softmax_scale=None, causal=False)
|
| 273 |
+
```
|
| 274 |
+
```python
|
| 275 |
+
flash_attn_func(q, k, v, dropout_p=0.0, softmax_scale=None, causal=False)
|
| 276 |
+
```
|
| 277 |
+
### 2.1: Change behavior of causal flag
|
| 278 |
+
|
| 279 |
+
If seqlen_q != seqlen_k and causal=True, the causal mask is aligned to the
|
| 280 |
+
bottom right corner of the attention matrix, instead of the top-left corner.
|
| 281 |
+
|
| 282 |
+
For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 =
|
| 283 |
+
masked out) is:
|
| 284 |
+
v2.0:
|
| 285 |
+
1 0 0 0 0
|
| 286 |
+
1 1 0 0 0
|
| 287 |
+
v2.1:
|
| 288 |
+
1 1 1 1 0
|
| 289 |
+
1 1 1 1 1
|
| 290 |
+
|
| 291 |
+
If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
|
| 292 |
+
v2.0:
|
| 293 |
+
1 0
|
| 294 |
+
1 1
|
| 295 |
+
1 1
|
| 296 |
+
1 1
|
| 297 |
+
1 1
|
| 298 |
+
v2.1:
|
| 299 |
+
0 0
|
| 300 |
+
0 0
|
| 301 |
+
0 0
|
| 302 |
+
1 0
|
| 303 |
+
1 1
|
| 304 |
+
If the row of the mask is all zero, the output will be zero.
|
| 305 |
+
|
| 306 |
+
### 2.2: Optimize for inference
|
| 307 |
+
|
| 308 |
+
Optimize for inference (iterative decoding) when query has very small sequence
|
| 309 |
+
length (e.g., query sequence length = 1). The bottleneck here is to load KV
|
| 310 |
+
cache as fast as possible, and we split the loading across different thread
|
| 311 |
+
blocks, with a separate kernel to combine results.
|
| 312 |
+
|
| 313 |
+
See the function `flash_attn_with_kvcache` with more features for inference
|
| 314 |
+
(perform rotary embedding, updating KV cache inplace).
|
| 315 |
+
|
| 316 |
+
Thanks to the xformers team, and in particular Daniel Haziza, for this
|
| 317 |
+
collaboration.
|
| 318 |
+
|
| 319 |
+
### 2.3: Local (i.e., sliding window) attention
|
| 320 |
+
|
| 321 |
+
Implement sliding window attention (i.e., local attention). Thanks to [Mistral
|
| 322 |
+
AI](https://mistral.ai/) and in particular Timothée Lacroix for this
|
| 323 |
+
contribution. Sliding window was used in the [Mistral 7B](https://mistral.ai/news/announcing-mistral-7b/) model.
|
| 324 |
+
|
| 325 |
+
### 2.4: ALiBi (attention with linear bias), deterministic backward pass.
|
| 326 |
+
|
| 327 |
+
Implement ALiBi (Press et al., 2021). Thanks to Sanghun Cho from Kakao Brain for this contribution.
|
| 328 |
+
|
| 329 |
+
Implement deterministic backward pass. Thanks to engineers from [Meituan](www.meituan.com) for this contribution.
|
| 330 |
+
|
| 331 |
+
### 2.5: Paged KV cache.
|
| 332 |
+
|
| 333 |
+
Support paged KV cache (i.e., [PagedAttention](https://arxiv.org/abs/2309.06180)).
|
| 334 |
+
Thanks to @beginlner for this contribution.
|
| 335 |
+
|
| 336 |
+
## Performance
|
| 337 |
+
|
| 338 |
+
We present expected speedup (combined forward + backward pass) and memory savings from using FlashAttention against PyTorch standard attention, depending on sequence length, on different GPUs (speedup depends on memory bandwidth - we see more speedup on slower GPU memory).
|
| 339 |
+
|
| 340 |
+
We currently have benchmarks for these GPUs:
|
| 341 |
+
* [A100](#a100)
|
| 342 |
+
* [H100](#h100)
|
| 343 |
+
<!-- * [RTX 3090](#rtx-3090) -->
|
| 344 |
+
<!-- * [T4](#t4) -->
|
| 345 |
+
|
| 346 |
+
### A100
|
| 347 |
+
|
| 348 |
+
We display FlashAttention speedup using these parameters:
|
| 349 |
+
* Head dimension 64 or 128, hidden dimension 2048 (i.e. either 32 or 16 heads).
|
| 350 |
+
* Sequence length 512, 1k, 2k, 4k, 8k, 16k.
|
| 351 |
+
* Batch size set to 16k / seqlen.
|
| 352 |
+
|
| 353 |
+
#### Speedup
|
| 354 |
+
|
| 355 |
+

|
| 356 |
+
|
| 357 |
+
#### Memory
|
| 358 |
+
|
| 359 |
+

|
| 360 |
+
|
| 361 |
+
We show memory savings in this graph (note that memory footprint is the same no matter if you use dropout or masking).
|
| 362 |
+
Memory savings are proportional to sequence length -- since standard attention has memory quadratic in sequence length, whereas FlashAttention has memory linear in sequence length.
|
| 363 |
+
We see 10X memory savings at sequence length 2K, and 20X at 4K.
|
| 364 |
+
As a result, FlashAttention can scale to much longer sequence lengths.
|
| 365 |
+
|
| 366 |
+
### H100
|
| 367 |
+
|
| 368 |
+

|
| 369 |
+
|
| 370 |
+
## Full model code and training script
|
| 371 |
+
|
| 372 |
+
We have released the full GPT model
|
| 373 |
+
[implementation](https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/models/gpt.py).
|
| 374 |
+
We also provide optimized implementations of other layers (e.g., MLP, LayerNorm,
|
| 375 |
+
cross-entropy loss, rotary embedding). Overall this speeds up training by 3-5x
|
| 376 |
+
compared to the baseline implementation from Huggingface, reaching up to 225
|
| 377 |
+
TFLOPs/sec per A100, equivalent to 72% model FLOPs utilization (we don't need
|
| 378 |
+
any activation checkpointing).
|
| 379 |
+
|
| 380 |
+
We also include a training
|
| 381 |
+
[script](https://github.com/Dao-AILab/flash-attention/tree/main/training) to
|
| 382 |
+
train GPT2 on Openwebtext and GPT3 on The Pile.
|
| 383 |
+
|
| 384 |
+
## Triton implementation of FlashAttention
|
| 385 |
+
|
| 386 |
+
Phil Tillet (OpenAI) has an experimental implementation of FlashAttention in Triton:
|
| 387 |
+
https://github.com/openai/triton/blob/master/python/tutorials/06-fused-attention.py
|
| 388 |
+
|
| 389 |
+
As Triton is a higher-level language than CUDA, it might be easier to understand
|
| 390 |
+
and experiment with. The notations in the Triton implementation are also closer
|
| 391 |
+
to what's used in our paper.
|
| 392 |
+
|
| 393 |
+
We also have an experimental implementation in Triton that support attention
|
| 394 |
+
bias (e.g. ALiBi):
|
| 395 |
+
https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/flash_attn_triton.py
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
## Tests
|
| 399 |
+
We test that FlashAttention produces the same output and gradient as a reference
|
| 400 |
+
implementation, up to some numerical tolerance. In particular, we check that the
|
| 401 |
+
maximum numerical error of FlashAttention is at most twice the numerical error
|
| 402 |
+
of a baseline implementation in Pytorch (for different head dimensions, input
|
| 403 |
+
dtype, sequence length, causal / non-causal).
|
| 404 |
+
|
| 405 |
+
To run the tests:
|
| 406 |
+
```sh
|
| 407 |
+
pytest -q -s tests/test_flash_attn.py
|
| 408 |
+
```
|
| 409 |
+
## When you encounter issues
|
| 410 |
+
|
| 411 |
+
This new release of FlashAttention-2 has been tested on several GPT-style
|
| 412 |
+
models, mostly on A100 GPUs.
|
| 413 |
+
|
| 414 |
+
If you encounter bugs, please open a GitHub Issue!
|
| 415 |
+
|
| 416 |
+
## Citation
|
| 417 |
+
If you use this codebase, or otherwise found our work valuable, please cite:
|
| 418 |
+
```
|
| 419 |
+
@inproceedings{dao2022flashattention,
|
| 420 |
+
title={Flash{A}ttention: Fast and Memory-Efficient Exact Attention with {IO}-Awareness},
|
| 421 |
+
author={Dao, Tri and Fu, Daniel Y. and Ermon, Stefano and Rudra, Atri and R{\'e}, Christopher},
|
| 422 |
+
booktitle={Advances in Neural Information Processing Systems},
|
| 423 |
+
year={2022}
|
| 424 |
+
}
|
| 425 |
+
@article{dao2023flashattention2,
|
| 426 |
+
title={Flash{A}ttention-2: Faster Attention with Better Parallelism and Work Partitioning},
|
| 427 |
+
author={Dao, Tri},
|
| 428 |
+
year={2023}
|
| 429 |
+
}
|
| 430 |
+
```
|
evalkit_cambrian/lib/python3.10/site-packages/flash_attn-2.5.8.dist-info/RECORD
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
flash_attn-2.5.8.dist-info/AUTHORS,sha256=879BRIJqYoQbf5rrxQV_ddotMqZSpXPtxnJQ7JSjd6c,29
|
| 2 |
+
flash_attn-2.5.8.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 3 |
+
flash_attn-2.5.8.dist-info/LICENSE,sha256=jJzLlsBl5wYTW2y60nm3IdphVuUfOl8nxrMymvlBbXM,1558
|
| 4 |
+
flash_attn-2.5.8.dist-info/METADATA,sha256=yIxPctqeijdJpCiTDjKObr0cFOhZgKeYpZge5bYcSDQ,19145
|
| 5 |
+
flash_attn-2.5.8.dist-info/RECORD,,
|
| 6 |
+
flash_attn-2.5.8.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 7 |
+
flash_attn-2.5.8.dist-info/WHEEL,sha256=z7jxKkYRKKJ7SfPfMKwPqpTzAxGIqIt5_V74YUShghE,105
|
| 8 |
+
flash_attn-2.5.8.dist-info/top_level.txt,sha256=M0iiwJuMya9VMy0DnzgZYGe6v-YSy4TY5LNv2xY3fVQ,29
|
| 9 |
+
flash_attn/__init__.py,sha256=HYG7EZzmXTRbMruX5fOSjCtAxYX17GqxXZWwk0MfLJw,285
|
| 10 |
+
flash_attn/__pycache__/__init__.cpython-310.pyc,,
|
| 11 |
+
flash_attn/__pycache__/bert_padding.cpython-310.pyc,,
|
| 12 |
+
flash_attn/__pycache__/flash_attn_interface.cpython-310.pyc,,
|
| 13 |
+
flash_attn/__pycache__/flash_attn_triton.cpython-310.pyc,,
|
| 14 |
+
flash_attn/__pycache__/flash_attn_triton_og.cpython-310.pyc,,
|
| 15 |
+
flash_attn/__pycache__/flash_blocksparse_attention.cpython-310.pyc,,
|
| 16 |
+
flash_attn/__pycache__/flash_blocksparse_attn_interface.cpython-310.pyc,,
|
| 17 |
+
flash_attn/__pycache__/fused_softmax.cpython-310.pyc,,
|
| 18 |
+
flash_attn/bert_padding.py,sha256=MYMu_Dg9AcnM4-D56X0QGxp5WieqJ045RAvC4kPFI5w,9535
|
| 19 |
+
flash_attn/flash_attn_interface.py,sha256=UUFvZoXnIvsP9PCpDQ0tMUu0b4f2q3KNCEliDGs63ac,45333
|
| 20 |
+
flash_attn/flash_attn_triton.py,sha256=Du81zbh8Ls70ExEsm00opziGvjGFfcZCoZDUO2zut9Q,41112
|
| 21 |
+
flash_attn/flash_attn_triton_og.py,sha256=LmvDju7LJG-wOYhoR6Zc2AmdPK2oWyB1VJpMjRhnWnE,11328
|
| 22 |
+
flash_attn/flash_blocksparse_attention.py,sha256=aJlttNZVxVaktCNYAfP5AdqeZDu8jv42_ZbTkRnDkWg,7469
|
| 23 |
+
flash_attn/flash_blocksparse_attn_interface.py,sha256=2qK2KvVCt851_j8ZzHvjS-aMfdgVDu1yne67-iScWfo,7265
|
| 24 |
+
flash_attn/fused_softmax.py,sha256=0-XbXo7R1a5h4-EpUzPy--lwlGytfTDW34WGM5nmBAY,7793
|
| 25 |
+
flash_attn/layers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 26 |
+
flash_attn/layers/__pycache__/__init__.cpython-310.pyc,,
|
| 27 |
+
flash_attn/layers/__pycache__/patch_embed.cpython-310.pyc,,
|
| 28 |
+
flash_attn/layers/__pycache__/rotary.cpython-310.pyc,,
|
| 29 |
+
flash_attn/layers/patch_embed.py,sha256=H58CgME_qSOPTZLOG08wFgrQS1j34pvNwMPrkTj3Ek4,2136
|
| 30 |
+
flash_attn/layers/rotary.py,sha256=RmDtuIpbFY-dqLATKwaPTjuVswcGJgL21_LvHwn2uw8,18874
|
| 31 |
+
flash_attn/losses/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 32 |
+
flash_attn/losses/__pycache__/__init__.cpython-310.pyc,,
|
| 33 |
+
flash_attn/losses/__pycache__/cross_entropy.cpython-310.pyc,,
|
| 34 |
+
flash_attn/losses/cross_entropy.py,sha256=ieTwDKIWtBGmn8utXePU-YFL3LltRIjK9F9juqPdTIQ,3130
|
| 35 |
+
flash_attn/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 36 |
+
flash_attn/models/__pycache__/__init__.cpython-310.pyc,,
|
| 37 |
+
flash_attn/models/__pycache__/baichuan.cpython-310.pyc,,
|
| 38 |
+
flash_attn/models/__pycache__/bert.cpython-310.pyc,,
|
| 39 |
+
flash_attn/models/__pycache__/bigcode.cpython-310.pyc,,
|
| 40 |
+
flash_attn/models/__pycache__/btlm.cpython-310.pyc,,
|
| 41 |
+
flash_attn/models/__pycache__/falcon.cpython-310.pyc,,
|
| 42 |
+
flash_attn/models/__pycache__/gpt.cpython-310.pyc,,
|
| 43 |
+
flash_attn/models/__pycache__/gpt_neox.cpython-310.pyc,,
|
| 44 |
+
flash_attn/models/__pycache__/gptj.cpython-310.pyc,,
|
| 45 |
+
flash_attn/models/__pycache__/llama.cpython-310.pyc,,
|
| 46 |
+
flash_attn/models/__pycache__/opt.cpython-310.pyc,,
|
| 47 |
+
flash_attn/models/__pycache__/vit.cpython-310.pyc,,
|
| 48 |
+
flash_attn/models/baichuan.py,sha256=eFNWwoRQ02AIeQP0OoK8pNvYw0dqnHOshLigCQPkAEc,5730
|
| 49 |
+
flash_attn/models/bert.py,sha256=-y6wVYzAfDqWWeO6n-dLapT1scn0lIsadKJKFzn48Vg,33241
|
| 50 |
+
flash_attn/models/bigcode.py,sha256=mkYeItoJtmWVf2wKkUs5oXjwdbTdGSo5eHxi0-1maZ8,9383
|
| 51 |
+
flash_attn/models/btlm.py,sha256=d8YDjYTa2G1DutYu-YuVf15S_Dn6oKn8-HzERoersLA,4631
|
| 52 |
+
flash_attn/models/falcon.py,sha256=mA3wGv1a4zhbrUSlFNVVmTgVjiXc1sFTOi55eYpgSPo,6033
|
| 53 |
+
flash_attn/models/gpt.py,sha256=_Eu0Kh0RQoXUVRSsVZQEKCLD1etHDi7w6Dc0_yrbN3I,47663
|
| 54 |
+
flash_attn/models/gpt_neox.py,sha256=_704a9KQ2PcnID8uMV7yZ4ggjGlh1zZH5gszue6D1bI,5159
|
| 55 |
+
flash_attn/models/gptj.py,sha256=k2eqMNyMbU7CJVM_BHBjlKt0ByFz6ITSETqS1mJa89g,4436
|
| 56 |
+
flash_attn/models/llama.py,sha256=bDRI308iRpeJngZLrQlLTGYAmwYotqzUxnjBMirfn-k,16581
|
| 57 |
+
flash_attn/models/opt.py,sha256=L0ZIWKpSP44lcEbiVCzVT9un_5gFMAW6cvnS3KHcb-A,5164
|
| 58 |
+
flash_attn/models/vit.py,sha256=7i0WUI_jZvQ5TMoSKPPzf77ZcyMDfDJuQaINzXN_iQU,14074
|
| 59 |
+
flash_attn/modules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 60 |
+
flash_attn/modules/__pycache__/__init__.cpython-310.pyc,,
|
| 61 |
+
flash_attn/modules/__pycache__/block.cpython-310.pyc,,
|
| 62 |
+
flash_attn/modules/__pycache__/embedding.cpython-310.pyc,,
|
| 63 |
+
flash_attn/modules/__pycache__/mha.cpython-310.pyc,,
|
| 64 |
+
flash_attn/modules/__pycache__/mlp.cpython-310.pyc,,
|
| 65 |
+
flash_attn/modules/block.py,sha256=WLi7JKj9_Zpk89ppzC7WTIoykJJ7TLOJbUSZePNnW1E,17349
|
| 66 |
+
flash_attn/modules/embedding.py,sha256=RCVeeiomlGNkLeQD8G6Udvex-NDI_xKD45hXjgZ2lbQ,8693
|
| 67 |
+
flash_attn/modules/mha.py,sha256=sVH2TdXO-P1loN7-TaPEcqdBHmR8nLB7SFDM9upkQ9w,43295
|
| 68 |
+
flash_attn/modules/mlp.py,sha256=G6KPQagfKq1DRn7hQRJ3OHznFJLZHj_PiidZE_zcLgg,6033
|
| 69 |
+
flash_attn/ops/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 70 |
+
flash_attn/ops/__pycache__/__init__.cpython-310.pyc,,
|
| 71 |
+
flash_attn/ops/__pycache__/activations.cpython-310.pyc,,
|
| 72 |
+
flash_attn/ops/__pycache__/fused_dense.cpython-310.pyc,,
|
| 73 |
+
flash_attn/ops/__pycache__/layer_norm.cpython-310.pyc,,
|
| 74 |
+
flash_attn/ops/__pycache__/rms_norm.cpython-310.pyc,,
|
| 75 |
+
flash_attn/ops/activations.py,sha256=4f9iruZ2SKJSmOlNQ9L3t5EpQ2tKJVlyy-iBBF6sMgs,3936
|
| 76 |
+
flash_attn/ops/fused_dense.py,sha256=ACJKqkIfxZibxI3nb5ycb3pXBKaL_CM63rUUyQYNAUE,27907
|
| 77 |
+
flash_attn/ops/layer_norm.py,sha256=zr7NXIm-2mtEynTp1CS0fbFGI2Mqdp41dY4AfDWF6EQ,22443
|
| 78 |
+
flash_attn/ops/rms_norm.py,sha256=XEnihcj0a4aSz4LO55m5iKGVn4HKTeKN8TIyHjuDgxI,3988
|
| 79 |
+
flash_attn/ops/triton/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
| 80 |
+
flash_attn/ops/triton/__pycache__/__init__.cpython-310.pyc,,
|
| 81 |
+
flash_attn/ops/triton/__pycache__/cross_entropy.cpython-310.pyc,,
|
| 82 |
+
flash_attn/ops/triton/__pycache__/k_activations.cpython-310.pyc,,
|
| 83 |
+
flash_attn/ops/triton/__pycache__/layer_norm.cpython-310.pyc,,
|
| 84 |
+
flash_attn/ops/triton/__pycache__/linear.cpython-310.pyc,,
|
| 85 |
+
flash_attn/ops/triton/__pycache__/mlp.cpython-310.pyc,,
|
| 86 |
+
flash_attn/ops/triton/__pycache__/rotary.cpython-310.pyc,,
|
| 87 |
+
flash_attn/ops/triton/cross_entropy.py,sha256=MmkqIlk8TUyngD-uzAM2-ENSXWqYQmDJxID3NIcWmz8,12531
|
| 88 |
+
flash_attn/ops/triton/k_activations.py,sha256=-Z3vIyO4JkqBMipKsPvhzmxljtBdIhJCsl_M-_ESqBo,4034
|
| 89 |
+
flash_attn/ops/triton/layer_norm.py,sha256=M96U0lHhglwC2tmZ0y_LOfAVhM0XdToo8TK_vb1hJZA,35020
|
| 90 |
+
flash_attn/ops/triton/linear.py,sha256=OtRvKz8xdpl-7v3q_ZTaS9fdBt9XrzMyapgRr50uBbM,20841
|
| 91 |
+
flash_attn/ops/triton/mlp.py,sha256=_5lbZJFZg_pXeXYITGt4V_6LkB_yddClB_jt-diCOdw,6068
|
| 92 |
+
flash_attn/ops/triton/rotary.py,sha256=-uMWkh2DhbhZYRY2PeMWZ8x3lDbuYp0ljsgHYYP3NYk,8582
|
| 93 |
+
flash_attn/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 94 |
+
flash_attn/utils/__pycache__/__init__.cpython-310.pyc,,
|
| 95 |
+
flash_attn/utils/__pycache__/benchmark.cpython-310.pyc,,
|
| 96 |
+
flash_attn/utils/__pycache__/distributed.cpython-310.pyc,,
|
| 97 |
+
flash_attn/utils/__pycache__/generation.cpython-310.pyc,,
|
| 98 |
+
flash_attn/utils/__pycache__/pretrained.cpython-310.pyc,,
|
| 99 |
+
flash_attn/utils/benchmark.py,sha256=JDtzdVhFyMIQqs3edbcXdXnmDf-O7RVpmZmn2ZFCvI0,7369
|
| 100 |
+
flash_attn/utils/distributed.py,sha256=qhcybRXtslssuV9LYaQy37haPaPtklM4YUMDx9UvnnQ,5825
|
| 101 |
+
flash_attn/utils/generation.py,sha256=4rh4XRDXN3xCfmPt4dtQz4m3StTIjyCg8L2VNZwdaVo,30466
|
| 102 |
+
flash_attn/utils/pretrained.py,sha256=VZ6qk90sBJA7M86gRzPsNc_CkQXkj5HyrJvwl0I355k,3246
|
| 103 |
+
flash_attn_2_cuda.cpython-310-x86_64-linux-gnu.so,sha256=Vcd8pEDDjr6Hw5wrtuFxdRB8lpcwSAxGRVLofVda4Vo,408288320
|
evalkit_cambrian/lib/python3.10/site-packages/flash_attn-2.5.8.dist-info/REQUESTED
ADDED
|
File without changes
|
evalkit_cambrian/lib/python3.10/site-packages/flash_attn-2.5.8.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: bdist_wheel (0.43.0)
|
| 3 |
+
Root-Is-Purelib: false
|
| 4 |
+
Tag: cp310-cp310-linux_x86_64
|
| 5 |
+
|
evalkit_cambrian/lib/python3.10/site-packages/flash_attn-2.5.8.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
flash_attn
|
| 2 |
+
flash_attn_2_cuda
|
evalkit_cambrian/lib/python3.10/site-packages/jinja2-3.1.5.dist-info/RECORD
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
jinja2-3.1.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 2 |
+
jinja2-3.1.5.dist-info/LICENSE.txt,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475
|
| 3 |
+
jinja2-3.1.5.dist-info/METADATA,sha256=PJNSUFNBwoqGA2vce2XSP8M_p2EYqAHYI7hoWLABtFo,2593
|
| 4 |
+
jinja2-3.1.5.dist-info/RECORD,,
|
| 5 |
+
jinja2-3.1.5.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 6 |
+
jinja2-3.1.5.dist-info/WHEEL,sha256=CpUCUxeHQbRN5UGRQHYRJorO5Af-Qy_fHMctcQ8DSGI,82
|
| 7 |
+
jinja2-3.1.5.dist-info/entry_points.txt,sha256=OL85gYU1eD8cuPlikifFngXpeBjaxl6rIJ8KkC_3r-I,58
|
| 8 |
+
jinja2/__init__.py,sha256=zpt8UHzpS2eB1c04kn1LkKkaXLXXcKd33klq7UJGIgg,1928
|
| 9 |
+
jinja2/__pycache__/__init__.cpython-310.pyc,,
|
| 10 |
+
jinja2/__pycache__/_identifier.cpython-310.pyc,,
|
| 11 |
+
jinja2/__pycache__/async_utils.cpython-310.pyc,,
|
| 12 |
+
jinja2/__pycache__/bccache.cpython-310.pyc,,
|
| 13 |
+
jinja2/__pycache__/compiler.cpython-310.pyc,,
|
| 14 |
+
jinja2/__pycache__/constants.cpython-310.pyc,,
|
| 15 |
+
jinja2/__pycache__/debug.cpython-310.pyc,,
|
| 16 |
+
jinja2/__pycache__/defaults.cpython-310.pyc,,
|
| 17 |
+
jinja2/__pycache__/environment.cpython-310.pyc,,
|
| 18 |
+
jinja2/__pycache__/exceptions.cpython-310.pyc,,
|
| 19 |
+
jinja2/__pycache__/ext.cpython-310.pyc,,
|
| 20 |
+
jinja2/__pycache__/filters.cpython-310.pyc,,
|
| 21 |
+
jinja2/__pycache__/idtracking.cpython-310.pyc,,
|
| 22 |
+
jinja2/__pycache__/lexer.cpython-310.pyc,,
|
| 23 |
+
jinja2/__pycache__/loaders.cpython-310.pyc,,
|
| 24 |
+
jinja2/__pycache__/meta.cpython-310.pyc,,
|
| 25 |
+
jinja2/__pycache__/nativetypes.cpython-310.pyc,,
|
| 26 |
+
jinja2/__pycache__/nodes.cpython-310.pyc,,
|
| 27 |
+
jinja2/__pycache__/optimizer.cpython-310.pyc,,
|
| 28 |
+
jinja2/__pycache__/parser.cpython-310.pyc,,
|
| 29 |
+
jinja2/__pycache__/runtime.cpython-310.pyc,,
|
| 30 |
+
jinja2/__pycache__/sandbox.cpython-310.pyc,,
|
| 31 |
+
jinja2/__pycache__/tests.cpython-310.pyc,,
|
| 32 |
+
jinja2/__pycache__/utils.cpython-310.pyc,,
|
| 33 |
+
jinja2/__pycache__/visitor.cpython-310.pyc,,
|
| 34 |
+
jinja2/_identifier.py,sha256=_zYctNKzRqlk_murTNlzrju1FFJL7Va_Ijqqd7ii2lU,1958
|
| 35 |
+
jinja2/async_utils.py,sha256=vK-PdsuorOMnWSnEkT3iUJRIkTnYgO2T6MnGxDgHI5o,2834
|
| 36 |
+
jinja2/bccache.py,sha256=gh0qs9rulnXo0PhX5jTJy2UHzI8wFnQ63o_vw7nhzRg,14061
|
| 37 |
+
jinja2/compiler.py,sha256=9RpCQl5X88BHllJiPsHPh295Hh0uApvwFJNQuutULeM,74131
|
| 38 |
+
jinja2/constants.py,sha256=GMoFydBF_kdpaRKPoM5cl5MviquVRLVyZtfp5-16jg0,1433
|
| 39 |
+
jinja2/debug.py,sha256=CnHqCDHd-BVGvti_8ZsTolnXNhA3ECsY-6n_2pwU8Hw,6297
|
| 40 |
+
jinja2/defaults.py,sha256=boBcSw78h-lp20YbaXSJsqkAI2uN_mD_TtCydpeq5wU,1267
|
| 41 |
+
jinja2/environment.py,sha256=9nhrP7Ch-NbGX00wvyr4yy-uhNHq2OCc60ggGrni_fk,61513
|
| 42 |
+
jinja2/exceptions.py,sha256=ioHeHrWwCWNaXX1inHmHVblvc4haO7AXsjCp3GfWvx0,5071
|
| 43 |
+
jinja2/ext.py,sha256=5PF5eHfh8mXAIxXHHRB2xXbXohi8pE3nHSOxa66uS7E,31875
|
| 44 |
+
jinja2/filters.py,sha256=cvRI2pqXNMzw8ba41VOBpgu_wu1r-l1_QxwD6yVoJ5g,55025
|
| 45 |
+
jinja2/idtracking.py,sha256=-ll5lIp73pML3ErUYiIJj7tdmWxcH_IlDv3yA_hiZYo,10555
|
| 46 |
+
jinja2/lexer.py,sha256=LYiYio6br-Tep9nPcupWXsPEtjluw3p1mU-lNBVRUfk,29786
|
| 47 |
+
jinja2/loaders.py,sha256=wIrnxjvcbqh5VwW28NSkfotiDq8qNCxIOSFbGUiSLB4,24055
|
| 48 |
+
jinja2/meta.py,sha256=OTDPkaFvU2Hgvx-6akz7154F8BIWaRmvJcBFvwopHww,4397
|
| 49 |
+
jinja2/nativetypes.py,sha256=7GIGALVJgdyL80oZJdQUaUfwSt5q2lSSZbXt0dNf_M4,4210
|
| 50 |
+
jinja2/nodes.py,sha256=m1Duzcr6qhZI8JQ6VyJgUNinjAf5bQzijSmDnMsvUx8,34579
|
| 51 |
+
jinja2/optimizer.py,sha256=rJnCRlQ7pZsEEmMhsQDgC_pKyDHxP5TPS6zVPGsgcu8,1651
|
| 52 |
+
jinja2/parser.py,sha256=lLOFy3sEmHc5IaEHRiH1sQVnId2moUQzhyeJZTtdY30,40383
|
| 53 |
+
jinja2/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 54 |
+
jinja2/runtime.py,sha256=gDk-GvdriJXqgsGbHgrcKTP0Yp6zPXzhzrIpCFH3jAU,34249
|
| 55 |
+
jinja2/sandbox.py,sha256=Mw2aitlY2I8la7FYhcX2YG9BtUYcLnD0Gh3d29cDWrY,15009
|
| 56 |
+
jinja2/tests.py,sha256=VLsBhVFnWg-PxSBz1MhRnNWgP1ovXk3neO1FLQMeC9Q,5926
|
| 57 |
+
jinja2/utils.py,sha256=rRp3o9e7ZKS4fyrWRbELyLcpuGVTFcnooaOa1qx_FIk,24129
|
| 58 |
+
jinja2/visitor.py,sha256=EcnL1PIwf_4RVCOMxsRNuR8AXHbS1qfAdMOE2ngKJz4,3557
|
evalkit_cambrian/lib/python3.10/site-packages/jinja2-3.1.5.dist-info/entry_points.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[babel.extractors]
|
| 2 |
+
jinja2=jinja2.ext:babel_extract[i18n]
|
| 3 |
+
|
evalkit_cambrian/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft201909/metaschema.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"$schema": "https://json-schema.org/draft/2019-09/schema",
|
| 3 |
+
"$id": "https://json-schema.org/draft/2019-09/schema",
|
| 4 |
+
"$vocabulary": {
|
| 5 |
+
"https://json-schema.org/draft/2019-09/vocab/core": true,
|
| 6 |
+
"https://json-schema.org/draft/2019-09/vocab/applicator": true,
|
| 7 |
+
"https://json-schema.org/draft/2019-09/vocab/validation": true,
|
| 8 |
+
"https://json-schema.org/draft/2019-09/vocab/meta-data": true,
|
| 9 |
+
"https://json-schema.org/draft/2019-09/vocab/format": false,
|
| 10 |
+
"https://json-schema.org/draft/2019-09/vocab/content": true
|
| 11 |
+
},
|
| 12 |
+
"$recursiveAnchor": true,
|
| 13 |
+
|
| 14 |
+
"title": "Core and Validation specifications meta-schema",
|
| 15 |
+
"allOf": [
|
| 16 |
+
{"$ref": "meta/core"},
|
| 17 |
+
{"$ref": "meta/applicator"},
|
| 18 |
+
{"$ref": "meta/validation"},
|
| 19 |
+
{"$ref": "meta/meta-data"},
|
| 20 |
+
{"$ref": "meta/format"},
|
| 21 |
+
{"$ref": "meta/content"}
|
| 22 |
+
],
|
| 23 |
+
"type": ["object", "boolean"],
|
| 24 |
+
"properties": {
|
| 25 |
+
"definitions": {
|
| 26 |
+
"$comment": "While no longer an official keyword as it is replaced by $defs, this keyword is retained in the meta-schema to prevent incompatible extensions as it remains in common use.",
|
| 27 |
+
"type": "object",
|
| 28 |
+
"additionalProperties": { "$recursiveRef": "#" },
|
| 29 |
+
"default": {}
|
| 30 |
+
},
|
| 31 |
+
"dependencies": {
|
| 32 |
+
"$comment": "\"dependencies\" is no longer a keyword, but schema authors should avoid redefining it to facilitate a smooth transition to \"dependentSchemas\" and \"dependentRequired\"",
|
| 33 |
+
"type": "object",
|
| 34 |
+
"additionalProperties": {
|
| 35 |
+
"anyOf": [
|
| 36 |
+
{ "$recursiveRef": "#" },
|
| 37 |
+
{ "$ref": "meta/validation#/$defs/stringArray" }
|
| 38 |
+
]
|
| 39 |
+
}
|
| 40 |
+
}
|
| 41 |
+
}
|
| 42 |
+
}
|
evalkit_cambrian/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/meta-data
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"$schema": "https://json-schema.org/draft/2019-09/schema",
|
| 3 |
+
"$id": "https://json-schema.org/draft/2019-09/meta/meta-data",
|
| 4 |
+
"$vocabulary": {
|
| 5 |
+
"https://json-schema.org/draft/2019-09/vocab/meta-data": true
|
| 6 |
+
},
|
| 7 |
+
"$recursiveAnchor": true,
|
| 8 |
+
|
| 9 |
+
"title": "Meta-data vocabulary meta-schema",
|
| 10 |
+
|
| 11 |
+
"type": ["object", "boolean"],
|
| 12 |
+
"properties": {
|
| 13 |
+
"title": {
|
| 14 |
+
"type": "string"
|
| 15 |
+
},
|
| 16 |
+
"description": {
|
| 17 |
+
"type": "string"
|
| 18 |
+
},
|
| 19 |
+
"default": true,
|
| 20 |
+
"deprecated": {
|
| 21 |
+
"type": "boolean",
|
| 22 |
+
"default": false
|
| 23 |
+
},
|
| 24 |
+
"readOnly": {
|
| 25 |
+
"type": "boolean",
|
| 26 |
+
"default": false
|
| 27 |
+
},
|
| 28 |
+
"writeOnly": {
|
| 29 |
+
"type": "boolean",
|
| 30 |
+
"default": false
|
| 31 |
+
},
|
| 32 |
+
"examples": {
|
| 33 |
+
"type": "array",
|
| 34 |
+
"items": true
|
| 35 |
+
}
|
| 36 |
+
}
|
| 37 |
+
}
|
evalkit_cambrian/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/applicator
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
| 3 |
+
"$id": "https://json-schema.org/draft/2020-12/meta/applicator",
|
| 4 |
+
"$vocabulary": {
|
| 5 |
+
"https://json-schema.org/draft/2020-12/vocab/applicator": true
|
| 6 |
+
},
|
| 7 |
+
"$dynamicAnchor": "meta",
|
| 8 |
+
|
| 9 |
+
"title": "Applicator vocabulary meta-schema",
|
| 10 |
+
"type": ["object", "boolean"],
|
| 11 |
+
"properties": {
|
| 12 |
+
"prefixItems": { "$ref": "#/$defs/schemaArray" },
|
| 13 |
+
"items": { "$dynamicRef": "#meta" },
|
| 14 |
+
"contains": { "$dynamicRef": "#meta" },
|
| 15 |
+
"additionalProperties": { "$dynamicRef": "#meta" },
|
| 16 |
+
"properties": {
|
| 17 |
+
"type": "object",
|
| 18 |
+
"additionalProperties": { "$dynamicRef": "#meta" },
|
| 19 |
+
"default": {}
|
| 20 |
+
},
|
| 21 |
+
"patternProperties": {
|
| 22 |
+
"type": "object",
|
| 23 |
+
"additionalProperties": { "$dynamicRef": "#meta" },
|
| 24 |
+
"propertyNames": { "format": "regex" },
|
| 25 |
+
"default": {}
|
| 26 |
+
},
|
| 27 |
+
"dependentSchemas": {
|
| 28 |
+
"type": "object",
|
| 29 |
+
"additionalProperties": { "$dynamicRef": "#meta" },
|
| 30 |
+
"default": {}
|
| 31 |
+
},
|
| 32 |
+
"propertyNames": { "$dynamicRef": "#meta" },
|
| 33 |
+
"if": { "$dynamicRef": "#meta" },
|
| 34 |
+
"then": { "$dynamicRef": "#meta" },
|
| 35 |
+
"else": { "$dynamicRef": "#meta" },
|
| 36 |
+
"allOf": { "$ref": "#/$defs/schemaArray" },
|
| 37 |
+
"anyOf": { "$ref": "#/$defs/schemaArray" },
|
| 38 |
+
"oneOf": { "$ref": "#/$defs/schemaArray" },
|
| 39 |
+
"not": { "$dynamicRef": "#meta" }
|
| 40 |
+
},
|
| 41 |
+
"$defs": {
|
| 42 |
+
"schemaArray": {
|
| 43 |
+
"type": "array",
|
| 44 |
+
"minItems": 1,
|
| 45 |
+
"items": { "$dynamicRef": "#meta" }
|
| 46 |
+
}
|
| 47 |
+
}
|
| 48 |
+
}
|
evalkit_cambrian/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/content
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
| 3 |
+
"$id": "https://json-schema.org/draft/2020-12/meta/content",
|
| 4 |
+
"$vocabulary": {
|
| 5 |
+
"https://json-schema.org/draft/2020-12/vocab/content": true
|
| 6 |
+
},
|
| 7 |
+
"$dynamicAnchor": "meta",
|
| 8 |
+
|
| 9 |
+
"title": "Content vocabulary meta-schema",
|
| 10 |
+
|
| 11 |
+
"type": ["object", "boolean"],
|
| 12 |
+
"properties": {
|
| 13 |
+
"contentEncoding": { "type": "string" },
|
| 14 |
+
"contentMediaType": { "type": "string" },
|
| 15 |
+
"contentSchema": { "$dynamicRef": "#meta" }
|
| 16 |
+
}
|
| 17 |
+
}
|
evalkit_cambrian/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/core
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
| 3 |
+
"$id": "https://json-schema.org/draft/2020-12/meta/core",
|
| 4 |
+
"$vocabulary": {
|
| 5 |
+
"https://json-schema.org/draft/2020-12/vocab/core": true
|
| 6 |
+
},
|
| 7 |
+
"$dynamicAnchor": "meta",
|
| 8 |
+
|
| 9 |
+
"title": "Core vocabulary meta-schema",
|
| 10 |
+
"type": ["object", "boolean"],
|
| 11 |
+
"properties": {
|
| 12 |
+
"$id": {
|
| 13 |
+
"$ref": "#/$defs/uriReferenceString",
|
| 14 |
+
"$comment": "Non-empty fragments not allowed.",
|
| 15 |
+
"pattern": "^[^#]*#?$"
|
| 16 |
+
},
|
| 17 |
+
"$schema": { "$ref": "#/$defs/uriString" },
|
| 18 |
+
"$ref": { "$ref": "#/$defs/uriReferenceString" },
|
| 19 |
+
"$anchor": { "$ref": "#/$defs/anchorString" },
|
| 20 |
+
"$dynamicRef": { "$ref": "#/$defs/uriReferenceString" },
|
| 21 |
+
"$dynamicAnchor": { "$ref": "#/$defs/anchorString" },
|
| 22 |
+
"$vocabulary": {
|
| 23 |
+
"type": "object",
|
| 24 |
+
"propertyNames": { "$ref": "#/$defs/uriString" },
|
| 25 |
+
"additionalProperties": {
|
| 26 |
+
"type": "boolean"
|
| 27 |
+
}
|
| 28 |
+
},
|
| 29 |
+
"$comment": {
|
| 30 |
+
"type": "string"
|
| 31 |
+
},
|
| 32 |
+
"$defs": {
|
| 33 |
+
"type": "object",
|
| 34 |
+
"additionalProperties": { "$dynamicRef": "#meta" }
|
| 35 |
+
}
|
| 36 |
+
},
|
| 37 |
+
"$defs": {
|
| 38 |
+
"anchorString": {
|
| 39 |
+
"type": "string",
|
| 40 |
+
"pattern": "^[A-Za-z_][-A-Za-z0-9._]*$"
|
| 41 |
+
},
|
| 42 |
+
"uriString": {
|
| 43 |
+
"type": "string",
|
| 44 |
+
"format": "uri"
|
| 45 |
+
},
|
| 46 |
+
"uriReferenceString": {
|
| 47 |
+
"type": "string",
|
| 48 |
+
"format": "uri-reference"
|
| 49 |
+
}
|
| 50 |
+
}
|
| 51 |
+
}
|
evalkit_cambrian/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"$schema": "https://json-schema.org/draft/2019-09/schema",
|
| 3 |
+
"$id": "https://json-schema.org/draft/2019-09/meta/format",
|
| 4 |
+
"$vocabulary": {
|
| 5 |
+
"https://json-schema.org/draft/2019-09/vocab/format": true
|
| 6 |
+
},
|
| 7 |
+
"$recursiveAnchor": true,
|
| 8 |
+
|
| 9 |
+
"title": "Format vocabulary meta-schema",
|
| 10 |
+
"type": ["object", "boolean"],
|
| 11 |
+
"properties": {
|
| 12 |
+
"format": { "type": "string" }
|
| 13 |
+
}
|
| 14 |
+
}
|
evalkit_cambrian/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format-assertion
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
| 3 |
+
"$id": "https://json-schema.org/draft/2020-12/meta/format-assertion",
|
| 4 |
+
"$vocabulary": {
|
| 5 |
+
"https://json-schema.org/draft/2020-12/vocab/format-assertion": true
|
| 6 |
+
},
|
| 7 |
+
"$dynamicAnchor": "meta",
|
| 8 |
+
|
| 9 |
+
"title": "Format vocabulary meta-schema for assertion results",
|
| 10 |
+
"type": ["object", "boolean"],
|
| 11 |
+
"properties": {
|
| 12 |
+
"format": { "type": "string" }
|
| 13 |
+
}
|
| 14 |
+
}
|
evalkit_cambrian/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/meta-data
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
| 3 |
+
"$id": "https://json-schema.org/draft/2020-12/meta/meta-data",
|
| 4 |
+
"$vocabulary": {
|
| 5 |
+
"https://json-schema.org/draft/2020-12/vocab/meta-data": true
|
| 6 |
+
},
|
| 7 |
+
"$dynamicAnchor": "meta",
|
| 8 |
+
|
| 9 |
+
"title": "Meta-data vocabulary meta-schema",
|
| 10 |
+
|
| 11 |
+
"type": ["object", "boolean"],
|
| 12 |
+
"properties": {
|
| 13 |
+
"title": {
|
| 14 |
+
"type": "string"
|
| 15 |
+
},
|
| 16 |
+
"description": {
|
| 17 |
+
"type": "string"
|
| 18 |
+
},
|
| 19 |
+
"default": true,
|
| 20 |
+
"deprecated": {
|
| 21 |
+
"type": "boolean",
|
| 22 |
+
"default": false
|
| 23 |
+
},
|
| 24 |
+
"readOnly": {
|
| 25 |
+
"type": "boolean",
|
| 26 |
+
"default": false
|
| 27 |
+
},
|
| 28 |
+
"writeOnly": {
|
| 29 |
+
"type": "boolean",
|
| 30 |
+
"default": false
|
| 31 |
+
},
|
| 32 |
+
"examples": {
|
| 33 |
+
"type": "array",
|
| 34 |
+
"items": true
|
| 35 |
+
}
|
| 36 |
+
}
|
| 37 |
+
}
|
evalkit_cambrian/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/validation
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
| 3 |
+
"$id": "https://json-schema.org/draft/2020-12/meta/validation",
|
| 4 |
+
"$vocabulary": {
|
| 5 |
+
"https://json-schema.org/draft/2020-12/vocab/validation": true
|
| 6 |
+
},
|
| 7 |
+
"$dynamicAnchor": "meta",
|
| 8 |
+
|
| 9 |
+
"title": "Validation vocabulary meta-schema",
|
| 10 |
+
"type": ["object", "boolean"],
|
| 11 |
+
"properties": {
|
| 12 |
+
"type": {
|
| 13 |
+
"anyOf": [
|
| 14 |
+
{ "$ref": "#/$defs/simpleTypes" },
|
| 15 |
+
{
|
| 16 |
+
"type": "array",
|
| 17 |
+
"items": { "$ref": "#/$defs/simpleTypes" },
|
| 18 |
+
"minItems": 1,
|
| 19 |
+
"uniqueItems": true
|
| 20 |
+
}
|
| 21 |
+
]
|
| 22 |
+
},
|
| 23 |
+
"const": true,
|
| 24 |
+
"enum": {
|
| 25 |
+
"type": "array",
|
| 26 |
+
"items": true
|
| 27 |
+
},
|
| 28 |
+
"multipleOf": {
|
| 29 |
+
"type": "number",
|
| 30 |
+
"exclusiveMinimum": 0
|
| 31 |
+
},
|
| 32 |
+
"maximum": {
|
| 33 |
+
"type": "number"
|
| 34 |
+
},
|
| 35 |
+
"exclusiveMaximum": {
|
| 36 |
+
"type": "number"
|
| 37 |
+
},
|
| 38 |
+
"minimum": {
|
| 39 |
+
"type": "number"
|
| 40 |
+
},
|
| 41 |
+
"exclusiveMinimum": {
|
| 42 |
+
"type": "number"
|
| 43 |
+
},
|
| 44 |
+
"maxLength": { "$ref": "#/$defs/nonNegativeInteger" },
|
| 45 |
+
"minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" },
|
| 46 |
+
"pattern": {
|
| 47 |
+
"type": "string",
|
| 48 |
+
"format": "regex"
|
| 49 |
+
},
|
| 50 |
+
"maxItems": { "$ref": "#/$defs/nonNegativeInteger" },
|
| 51 |
+
"minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" },
|
| 52 |
+
"uniqueItems": {
|
| 53 |
+
"type": "boolean",
|
| 54 |
+
"default": false
|
| 55 |
+
},
|
| 56 |
+
"maxContains": { "$ref": "#/$defs/nonNegativeInteger" },
|
| 57 |
+
"minContains": {
|
| 58 |
+
"$ref": "#/$defs/nonNegativeInteger",
|
| 59 |
+
"default": 1
|
| 60 |
+
},
|
| 61 |
+
"maxProperties": { "$ref": "#/$defs/nonNegativeInteger" },
|
| 62 |
+
"minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" },
|
| 63 |
+
"required": { "$ref": "#/$defs/stringArray" },
|
| 64 |
+
"dependentRequired": {
|
| 65 |
+
"type": "object",
|
| 66 |
+
"additionalProperties": {
|
| 67 |
+
"$ref": "#/$defs/stringArray"
|
| 68 |
+
}
|
| 69 |
+
}
|
| 70 |
+
},
|
| 71 |
+
"$defs": {
|
| 72 |
+
"nonNegativeInteger": {
|
| 73 |
+
"type": "integer",
|
| 74 |
+
"minimum": 0
|
| 75 |
+
},
|
| 76 |
+
"nonNegativeIntegerDefault0": {
|
| 77 |
+
"$ref": "#/$defs/nonNegativeInteger",
|
| 78 |
+
"default": 0
|
| 79 |
+
},
|
| 80 |
+
"simpleTypes": {
|
| 81 |
+
"enum": [
|
| 82 |
+
"array",
|
| 83 |
+
"boolean",
|
| 84 |
+
"integer",
|
| 85 |
+
"null",
|
| 86 |
+
"number",
|
| 87 |
+
"object",
|
| 88 |
+
"string"
|
| 89 |
+
]
|
| 90 |
+
},
|
| 91 |
+
"stringArray": {
|
| 92 |
+
"type": "array",
|
| 93 |
+
"items": { "type": "string" },
|
| 94 |
+
"uniqueItems": true,
|
| 95 |
+
"default": []
|
| 96 |
+
}
|
| 97 |
+
}
|
| 98 |
+
}
|
evalkit_cambrian/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft3/metaschema.json
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"$schema" : "http://json-schema.org/draft-03/schema#",
|
| 3 |
+
"id" : "http://json-schema.org/draft-03/schema#",
|
| 4 |
+
"type" : "object",
|
| 5 |
+
|
| 6 |
+
"properties" : {
|
| 7 |
+
"type" : {
|
| 8 |
+
"type" : ["string", "array"],
|
| 9 |
+
"items" : {
|
| 10 |
+
"type" : ["string", {"$ref" : "#"}]
|
| 11 |
+
},
|
| 12 |
+
"uniqueItems" : true,
|
| 13 |
+
"default" : "any"
|
| 14 |
+
},
|
| 15 |
+
|
| 16 |
+
"properties" : {
|
| 17 |
+
"type" : "object",
|
| 18 |
+
"additionalProperties" : {"$ref" : "#"},
|
| 19 |
+
"default" : {}
|
| 20 |
+
},
|
| 21 |
+
|
| 22 |
+
"patternProperties" : {
|
| 23 |
+
"type" : "object",
|
| 24 |
+
"additionalProperties" : {"$ref" : "#"},
|
| 25 |
+
"default" : {}
|
| 26 |
+
},
|
| 27 |
+
|
| 28 |
+
"additionalProperties" : {
|
| 29 |
+
"type" : [{"$ref" : "#"}, "boolean"],
|
| 30 |
+
"default" : {}
|
| 31 |
+
},
|
| 32 |
+
|
| 33 |
+
"items" : {
|
| 34 |
+
"type" : [{"$ref" : "#"}, "array"],
|
| 35 |
+
"items" : {"$ref" : "#"},
|
| 36 |
+
"default" : {}
|
| 37 |
+
},
|
| 38 |
+
|
| 39 |
+
"additionalItems" : {
|
| 40 |
+
"type" : [{"$ref" : "#"}, "boolean"],
|
| 41 |
+
"default" : {}
|
| 42 |
+
},
|
| 43 |
+
|
| 44 |
+
"required" : {
|
| 45 |
+
"type" : "boolean",
|
| 46 |
+
"default" : false
|
| 47 |
+
},
|
| 48 |
+
|
| 49 |
+
"dependencies" : {
|
| 50 |
+
"type" : "object",
|
| 51 |
+
"additionalProperties" : {
|
| 52 |
+
"type" : ["string", "array", {"$ref" : "#"}],
|
| 53 |
+
"items" : {
|
| 54 |
+
"type" : "string"
|
| 55 |
+
}
|
| 56 |
+
},
|
| 57 |
+
"default" : {}
|
| 58 |
+
},
|
| 59 |
+
|
| 60 |
+
"minimum" : {
|
| 61 |
+
"type" : "number"
|
| 62 |
+
},
|
| 63 |
+
|
| 64 |
+
"maximum" : {
|
| 65 |
+
"type" : "number"
|
| 66 |
+
},
|
| 67 |
+
|
| 68 |
+
"exclusiveMinimum" : {
|
| 69 |
+
"type" : "boolean",
|
| 70 |
+
"default" : false
|
| 71 |
+
},
|
| 72 |
+
|
| 73 |
+
"exclusiveMaximum" : {
|
| 74 |
+
"type" : "boolean",
|
| 75 |
+
"default" : false
|
| 76 |
+
},
|
| 77 |
+
|
| 78 |
+
"minItems" : {
|
| 79 |
+
"type" : "integer",
|
| 80 |
+
"minimum" : 0,
|
| 81 |
+
"default" : 0
|
| 82 |
+
},
|
| 83 |
+
|
| 84 |
+
"maxItems" : {
|
| 85 |
+
"type" : "integer",
|
| 86 |
+
"minimum" : 0
|
| 87 |
+
},
|
| 88 |
+
|
| 89 |
+
"uniqueItems" : {
|
| 90 |
+
"type" : "boolean",
|
| 91 |
+
"default" : false
|
| 92 |
+
},
|
| 93 |
+
|
| 94 |
+
"pattern" : {
|
| 95 |
+
"type" : "string",
|
| 96 |
+
"format" : "regex"
|
| 97 |
+
},
|
| 98 |
+
|
| 99 |
+
"minLength" : {
|
| 100 |
+
"type" : "integer",
|
| 101 |
+
"minimum" : 0,
|
| 102 |
+
"default" : 0
|
| 103 |
+
},
|
| 104 |
+
|
| 105 |
+
"maxLength" : {
|
| 106 |
+
"type" : "integer"
|
| 107 |
+
},
|
| 108 |
+
|
| 109 |
+
"enum" : {
|
| 110 |
+
"type" : "array",
|
| 111 |
+
"minItems" : 1,
|
| 112 |
+
"uniqueItems" : true
|
| 113 |
+
},
|
| 114 |
+
|
| 115 |
+
"default" : {
|
| 116 |
+
"type" : "any"
|
| 117 |
+
},
|
| 118 |
+
|
| 119 |
+
"title" : {
|
| 120 |
+
"type" : "string"
|
| 121 |
+
},
|
| 122 |
+
|
| 123 |
+
"description" : {
|
| 124 |
+
"type" : "string"
|
| 125 |
+
},
|
| 126 |
+
|
| 127 |
+
"format" : {
|
| 128 |
+
"type" : "string"
|
| 129 |
+
},
|
| 130 |
+
|
| 131 |
+
"divisibleBy" : {
|
| 132 |
+
"type" : "number",
|
| 133 |
+
"minimum" : 0,
|
| 134 |
+
"exclusiveMinimum" : true,
|
| 135 |
+
"default" : 1
|
| 136 |
+
},
|
| 137 |
+
|
| 138 |
+
"disallow" : {
|
| 139 |
+
"type" : ["string", "array"],
|
| 140 |
+
"items" : {
|
| 141 |
+
"type" : ["string", {"$ref" : "#"}]
|
| 142 |
+
},
|
| 143 |
+
"uniqueItems" : true
|
| 144 |
+
},
|
| 145 |
+
|
| 146 |
+
"extends" : {
|
| 147 |
+
"type" : [{"$ref" : "#"}, "array"],
|
| 148 |
+
"items" : {"$ref" : "#"},
|
| 149 |
+
"default" : {}
|
| 150 |
+
},
|
| 151 |
+
|
| 152 |
+
"id" : {
|
| 153 |
+
"type" : "string"
|
| 154 |
+
},
|
| 155 |
+
|
| 156 |
+
"$ref" : {
|
| 157 |
+
"type" : "string"
|
| 158 |
+
},
|
| 159 |
+
|
| 160 |
+
"$schema" : {
|
| 161 |
+
"type" : "string",
|
| 162 |
+
"format" : "uri"
|
| 163 |
+
}
|
| 164 |
+
},
|
| 165 |
+
|
| 166 |
+
"dependencies" : {
|
| 167 |
+
"exclusiveMinimum" : "minimum",
|
| 168 |
+
"exclusiveMaximum" : "maximum"
|
| 169 |
+
},
|
| 170 |
+
|
| 171 |
+
"default" : {}
|
| 172 |
+
}
|
evalkit_cambrian/lib/python3.10/site-packages/torchvision-0.17.0+cu118.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
evalkit_cambrian/lib/python3.10/site-packages/torchvision-0.17.0+cu118.dist-info/METADATA
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: torchvision
|
| 3 |
+
Version: 0.17.0+cu118
|
| 4 |
+
Summary: image and video datasets and models for torch deep learning
|
| 5 |
+
Home-page: https://github.com/pytorch/vision
|
| 6 |
+
Author: PyTorch Core Team
|
| 7 |
+
Author-email: soumith@pytorch.org
|
| 8 |
+
License: BSD
|
| 9 |
+
Requires-Python: >=3.8
|
| 10 |
+
Description-Content-Type: text/markdown
|
| 11 |
+
License-File: LICENSE
|
| 12 |
+
Requires-Dist: numpy
|
| 13 |
+
Requires-Dist: requests
|
| 14 |
+
Requires-Dist: torch (==2.2.0)
|
| 15 |
+
Requires-Dist: pillow (!=8.3.*,>=5.3.0)
|
| 16 |
+
Provides-Extra: scipy
|
| 17 |
+
Requires-Dist: scipy ; extra == 'scipy'
|
| 18 |
+
|
| 19 |
+
# torchvision
|
| 20 |
+
|
| 21 |
+
[](https://pepy.tech/project/torchvision)
|
| 22 |
+
[](https://pytorch.org/vision/stable/index.html)
|
| 23 |
+
|
| 24 |
+
The torchvision package consists of popular datasets, model architectures, and common image transformations for computer
|
| 25 |
+
vision.
|
| 26 |
+
|
| 27 |
+
## Installation
|
| 28 |
+
|
| 29 |
+
Please refer to the [official
|
| 30 |
+
instructions](https://pytorch.org/get-started/locally/) to install the stable
|
| 31 |
+
versions of `torch` and `torchvision` on your system.
|
| 32 |
+
|
| 33 |
+
To build source, refer to our [contributing
|
| 34 |
+
page](https://github.com/pytorch/vision/blob/main/CONTRIBUTING.md#development-installation).
|
| 35 |
+
|
| 36 |
+
The following is the corresponding `torchvision` versions and supported Python
|
| 37 |
+
versions.
|
| 38 |
+
|
| 39 |
+
| `torch` | `torchvision` | Python |
|
| 40 |
+
| ------------------ | ------------------ | ------------------- |
|
| 41 |
+
| `main` / `nightly` | `main` / `nightly` | `>=3.8`, `<=3.11` |
|
| 42 |
+
| `2.1` | `0.16` | `>=3.8`, `<=3.11` |
|
| 43 |
+
| `2.0` | `0.15` | `>=3.8`, `<=3.11` |
|
| 44 |
+
| `1.13` | `0.14` | `>=3.7.2`, `<=3.10` |
|
| 45 |
+
|
| 46 |
+
<details>
|
| 47 |
+
<summary>older versions</summary>
|
| 48 |
+
|
| 49 |
+
| `torch` | `torchvision` | Python |
|
| 50 |
+
|---------|-------------------|---------------------------|
|
| 51 |
+
| `1.12` | `0.13` | `>=3.7`, `<=3.10` |
|
| 52 |
+
| `1.11` | `0.12` | `>=3.7`, `<=3.10` |
|
| 53 |
+
| `1.10` | `0.11` | `>=3.6`, `<=3.9` |
|
| 54 |
+
| `1.9` | `0.10` | `>=3.6`, `<=3.9` |
|
| 55 |
+
| `1.8` | `0.9` | `>=3.6`, `<=3.9` |
|
| 56 |
+
| `1.7` | `0.8` | `>=3.6`, `<=3.9` |
|
| 57 |
+
| `1.6` | `0.7` | `>=3.6`, `<=3.8` |
|
| 58 |
+
| `1.5` | `0.6` | `>=3.5`, `<=3.8` |
|
| 59 |
+
| `1.4` | `0.5` | `==2.7`, `>=3.5`, `<=3.8` |
|
| 60 |
+
| `1.3` | `0.4.2` / `0.4.3` | `==2.7`, `>=3.5`, `<=3.7` |
|
| 61 |
+
| `1.2` | `0.4.1` | `==2.7`, `>=3.5`, `<=3.7` |
|
| 62 |
+
| `1.1` | `0.3` | `==2.7`, `>=3.5`, `<=3.7` |
|
| 63 |
+
| `<=1.0` | `0.2` | `==2.7`, `>=3.5`, `<=3.7` |
|
| 64 |
+
|
| 65 |
+
</details>
|
| 66 |
+
|
| 67 |
+
## Image Backends
|
| 68 |
+
|
| 69 |
+
Torchvision currently supports the following image backends:
|
| 70 |
+
|
| 71 |
+
- torch tensors
|
| 72 |
+
- PIL images:
|
| 73 |
+
- [Pillow](https://python-pillow.org/)
|
| 74 |
+
- [Pillow-SIMD](https://github.com/uploadcare/pillow-simd) - a **much faster** drop-in replacement for Pillow with SIMD.
|
| 75 |
+
|
| 76 |
+
Read more in in our [docs](https://pytorch.org/vision/stable/transforms.html).
|
| 77 |
+
|
| 78 |
+
## [UNSTABLE] Video Backend
|
| 79 |
+
|
| 80 |
+
Torchvision currently supports the following video backends:
|
| 81 |
+
|
| 82 |
+
- [pyav](https://github.com/PyAV-Org/PyAV) (default) - Pythonic binding for ffmpeg libraries.
|
| 83 |
+
- video_reader - This needs ffmpeg to be installed and torchvision to be built from source. There shouldn't be any
|
| 84 |
+
conflicting version of ffmpeg installed. Currently, this is only supported on Linux.
|
| 85 |
+
|
| 86 |
+
```
|
| 87 |
+
conda install -c conda-forge 'ffmpeg<4.3'
|
| 88 |
+
python setup.py install
|
| 89 |
+
```
|
| 90 |
+
|
| 91 |
+
# Using the models on C++
|
| 92 |
+
|
| 93 |
+
TorchVision provides an example project for how to use the models on C++ using JIT Script.
|
| 94 |
+
|
| 95 |
+
Installation From source:
|
| 96 |
+
|
| 97 |
+
```
|
| 98 |
+
mkdir build
|
| 99 |
+
cd build
|
| 100 |
+
# Add -DWITH_CUDA=on support for the CUDA if needed
|
| 101 |
+
cmake ..
|
| 102 |
+
make
|
| 103 |
+
make install
|
| 104 |
+
```
|
| 105 |
+
|
| 106 |
+
Once installed, the library can be accessed in cmake (after properly configuring `CMAKE_PREFIX_PATH`) via the
|
| 107 |
+
`TorchVision::TorchVision` target:
|
| 108 |
+
|
| 109 |
+
```
|
| 110 |
+
find_package(TorchVision REQUIRED)
|
| 111 |
+
target_link_libraries(my-target PUBLIC TorchVision::TorchVision)
|
| 112 |
+
```
|
| 113 |
+
|
| 114 |
+
The `TorchVision` package will also automatically look for the `Torch` package and add it as a dependency to
|
| 115 |
+
`my-target`, so make sure that it is also available to cmake via the `CMAKE_PREFIX_PATH`.
|
| 116 |
+
|
| 117 |
+
For an example setup, take a look at `examples/cpp/hello_world`.
|
| 118 |
+
|
| 119 |
+
Python linking is disabled by default when compiling TorchVision with CMake, this allows you to run models without any
|
| 120 |
+
Python dependency. In some special cases where TorchVision's operators are used from Python code, you may need to link
|
| 121 |
+
to Python. This can be done by passing `-DUSE_PYTHON=on` to CMake.
|
| 122 |
+
|
| 123 |
+
### TorchVision Operators
|
| 124 |
+
|
| 125 |
+
In order to get the torchvision operators registered with torch (eg. for the JIT), all you need to do is to ensure that
|
| 126 |
+
you `#include <torchvision/vision.h>` in your project.
|
| 127 |
+
|
| 128 |
+
## Documentation
|
| 129 |
+
|
| 130 |
+
You can find the API documentation on the pytorch website: <https://pytorch.org/vision/stable/index.html>
|
| 131 |
+
|
| 132 |
+
## Contributing
|
| 133 |
+
|
| 134 |
+
See the [CONTRIBUTING](CONTRIBUTING.md) file for how to help out.
|
| 135 |
+
|
| 136 |
+
## Disclaimer on Datasets
|
| 137 |
+
|
| 138 |
+
This is a utility library that downloads and prepares public datasets. We do not host or distribute these datasets,
|
| 139 |
+
vouch for their quality or fairness, or claim that you have license to use the dataset. It is your responsibility to
|
| 140 |
+
determine whether you have permission to use the dataset under the dataset's license.
|
| 141 |
+
|
| 142 |
+
If you're a dataset owner and wish to update any part of it (description, citation, etc.), or do not want your dataset
|
| 143 |
+
to be included in this library, please get in touch through a GitHub issue. Thanks for your contribution to the ML
|
| 144 |
+
community!
|
| 145 |
+
|
| 146 |
+
## Pre-trained Model License
|
| 147 |
+
|
| 148 |
+
The pre-trained models provided in this library may have their own licenses or terms and conditions derived from the
|
| 149 |
+
dataset used for training. It is your responsibility to determine whether you have permission to use the models for your
|
| 150 |
+
use case.
|
| 151 |
+
|
| 152 |
+
More specifically, SWAG models are released under the CC-BY-NC 4.0 license. See
|
| 153 |
+
[SWAG LICENSE](https://github.com/facebookresearch/SWAG/blob/main/LICENSE) for additional details.
|
| 154 |
+
|
| 155 |
+
## Citing TorchVision
|
| 156 |
+
|
| 157 |
+
If you find TorchVision useful in your work, please consider citing the following BibTeX entry:
|
| 158 |
+
|
| 159 |
+
```bibtex
|
| 160 |
+
@software{torchvision2016,
|
| 161 |
+
title = {TorchVision: PyTorch's Computer Vision library},
|
| 162 |
+
author = {TorchVision maintainers and contributors},
|
| 163 |
+
year = 2016,
|
| 164 |
+
journal = {GitHub repository},
|
| 165 |
+
publisher = {GitHub},
|
| 166 |
+
howpublished = {\url{https://github.com/pytorch/vision}}
|
| 167 |
+
}
|
| 168 |
+
```
|
evalkit_cambrian/lib/python3.10/site-packages/torchvision-0.17.0+cu118.dist-info/RECORD
ADDED
|
@@ -0,0 +1,384 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
torchvision-0.17.0+cu118.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 2 |
+
torchvision-0.17.0+cu118.dist-info/LICENSE,sha256=ZQL2doUc_iX4r3VTHfsyN1tzJbc8N-e0N0H6QiiT5x0,1517
|
| 3 |
+
torchvision-0.17.0+cu118.dist-info/METADATA,sha256=eJ616lX3J6fOVY7ZQPuy5J9C1WMD6sBldzI0DyztITg,6583
|
| 4 |
+
torchvision-0.17.0+cu118.dist-info/RECORD,,
|
| 5 |
+
torchvision-0.17.0+cu118.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 6 |
+
torchvision-0.17.0+cu118.dist-info/WHEEL,sha256=nTy_Z8ivGEB6qFwVLg7_h6rq0Jt0JU5Pz2cL2_FjSMQ,105
|
| 7 |
+
torchvision-0.17.0+cu118.dist-info/top_level.txt,sha256=ucJZoaluBW9BGYT4TuCE6zoZY_JuSP30wbDh-IRpxUU,12
|
| 8 |
+
torchvision.libs/libcudart.60cfec8e.so.11.0,sha256=0NpBrhMjz07rYQEj1p13FBJM_l6_zE5F8CuRDlHFfuY,679264
|
| 9 |
+
torchvision.libs/libjpeg.ceea7512.so.62,sha256=Q0Nt1U7kvyOPOH37o9EyH96wBEFcgH1NNJDDaL1eXew,285328
|
| 10 |
+
torchvision.libs/libnvjpeg.70530407.so.11,sha256=YnSLGGxv2PIM8xw9oWBNAjbGADpqJG1LdhR2Y6yNiGI,5690112
|
| 11 |
+
torchvision.libs/libpng16.7f72a3c5.so.16,sha256=YCpAWFa9tnowZ95WDNov8ovQCqdDlX83YfpY9iYxJss,1079081
|
| 12 |
+
torchvision.libs/libz.37eba27a.so.1,sha256=Cw5oKp3H_UiVpngyiPhRt5PciWM_KHFAJ5dPpNZvORQ,124744
|
| 13 |
+
torchvision/_C.so,sha256=8eeAsCBfwmWb9H9dUwSaV4AX0K11p1MWadpc-ts-n1U,7628736
|
| 14 |
+
torchvision/__init__.py,sha256=3AOjL7NqkSGNquYbrRUSIVXwC-8kBp-BZmhMspcnQ_c,3368
|
| 15 |
+
torchvision/__pycache__/__init__.cpython-310.pyc,,
|
| 16 |
+
torchvision/__pycache__/_internally_replaced_utils.cpython-310.pyc,,
|
| 17 |
+
torchvision/__pycache__/_meta_registrations.cpython-310.pyc,,
|
| 18 |
+
torchvision/__pycache__/_utils.cpython-310.pyc,,
|
| 19 |
+
torchvision/__pycache__/extension.cpython-310.pyc,,
|
| 20 |
+
torchvision/__pycache__/utils.cpython-310.pyc,,
|
| 21 |
+
torchvision/__pycache__/version.cpython-310.pyc,,
|
| 22 |
+
torchvision/_internally_replaced_utils.py,sha256=67zSUHOn6JwdnMUQchHgpNLCtWQQ9dJFpV_OUn8Qb_w,1389
|
| 23 |
+
torchvision/_meta_registrations.py,sha256=Rp994EiqKt201-HFhRmJyIn6bJxwOxUwEyXIRG5GbjI,7212
|
| 24 |
+
torchvision/_utils.py,sha256=6TWK0JGaZVQrofgCAp5ox61_NQE2gIwhYouKQMiTaJ8,934
|
| 25 |
+
torchvision/datasets/__init__.py,sha256=AHSoX8LkWIt7RGlJDmk64pDvmWq6GCh-D7XwE2l382A,3587
|
| 26 |
+
torchvision/datasets/__pycache__/__init__.cpython-310.pyc,,
|
| 27 |
+
torchvision/datasets/__pycache__/_optical_flow.cpython-310.pyc,,
|
| 28 |
+
torchvision/datasets/__pycache__/_stereo_matching.cpython-310.pyc,,
|
| 29 |
+
torchvision/datasets/__pycache__/caltech.cpython-310.pyc,,
|
| 30 |
+
torchvision/datasets/__pycache__/celeba.cpython-310.pyc,,
|
| 31 |
+
torchvision/datasets/__pycache__/cifar.cpython-310.pyc,,
|
| 32 |
+
torchvision/datasets/__pycache__/cityscapes.cpython-310.pyc,,
|
| 33 |
+
torchvision/datasets/__pycache__/clevr.cpython-310.pyc,,
|
| 34 |
+
torchvision/datasets/__pycache__/coco.cpython-310.pyc,,
|
| 35 |
+
torchvision/datasets/__pycache__/country211.cpython-310.pyc,,
|
| 36 |
+
torchvision/datasets/__pycache__/dtd.cpython-310.pyc,,
|
| 37 |
+
torchvision/datasets/__pycache__/eurosat.cpython-310.pyc,,
|
| 38 |
+
torchvision/datasets/__pycache__/fakedata.cpython-310.pyc,,
|
| 39 |
+
torchvision/datasets/__pycache__/fer2013.cpython-310.pyc,,
|
| 40 |
+
torchvision/datasets/__pycache__/fgvc_aircraft.cpython-310.pyc,,
|
| 41 |
+
torchvision/datasets/__pycache__/flickr.cpython-310.pyc,,
|
| 42 |
+
torchvision/datasets/__pycache__/flowers102.cpython-310.pyc,,
|
| 43 |
+
torchvision/datasets/__pycache__/folder.cpython-310.pyc,,
|
| 44 |
+
torchvision/datasets/__pycache__/food101.cpython-310.pyc,,
|
| 45 |
+
torchvision/datasets/__pycache__/gtsrb.cpython-310.pyc,,
|
| 46 |
+
torchvision/datasets/__pycache__/hmdb51.cpython-310.pyc,,
|
| 47 |
+
torchvision/datasets/__pycache__/imagenet.cpython-310.pyc,,
|
| 48 |
+
torchvision/datasets/__pycache__/imagenette.cpython-310.pyc,,
|
| 49 |
+
torchvision/datasets/__pycache__/inaturalist.cpython-310.pyc,,
|
| 50 |
+
torchvision/datasets/__pycache__/kinetics.cpython-310.pyc,,
|
| 51 |
+
torchvision/datasets/__pycache__/kitti.cpython-310.pyc,,
|
| 52 |
+
torchvision/datasets/__pycache__/lfw.cpython-310.pyc,,
|
| 53 |
+
torchvision/datasets/__pycache__/lsun.cpython-310.pyc,,
|
| 54 |
+
torchvision/datasets/__pycache__/mnist.cpython-310.pyc,,
|
| 55 |
+
torchvision/datasets/__pycache__/moving_mnist.cpython-310.pyc,,
|
| 56 |
+
torchvision/datasets/__pycache__/omniglot.cpython-310.pyc,,
|
| 57 |
+
torchvision/datasets/__pycache__/oxford_iiit_pet.cpython-310.pyc,,
|
| 58 |
+
torchvision/datasets/__pycache__/pcam.cpython-310.pyc,,
|
| 59 |
+
torchvision/datasets/__pycache__/phototour.cpython-310.pyc,,
|
| 60 |
+
torchvision/datasets/__pycache__/places365.cpython-310.pyc,,
|
| 61 |
+
torchvision/datasets/__pycache__/rendered_sst2.cpython-310.pyc,,
|
| 62 |
+
torchvision/datasets/__pycache__/sbd.cpython-310.pyc,,
|
| 63 |
+
torchvision/datasets/__pycache__/sbu.cpython-310.pyc,,
|
| 64 |
+
torchvision/datasets/__pycache__/semeion.cpython-310.pyc,,
|
| 65 |
+
torchvision/datasets/__pycache__/stanford_cars.cpython-310.pyc,,
|
| 66 |
+
torchvision/datasets/__pycache__/stl10.cpython-310.pyc,,
|
| 67 |
+
torchvision/datasets/__pycache__/sun397.cpython-310.pyc,,
|
| 68 |
+
torchvision/datasets/__pycache__/svhn.cpython-310.pyc,,
|
| 69 |
+
torchvision/datasets/__pycache__/ucf101.cpython-310.pyc,,
|
| 70 |
+
torchvision/datasets/__pycache__/usps.cpython-310.pyc,,
|
| 71 |
+
torchvision/datasets/__pycache__/utils.cpython-310.pyc,,
|
| 72 |
+
torchvision/datasets/__pycache__/video_utils.cpython-310.pyc,,
|
| 73 |
+
torchvision/datasets/__pycache__/vision.cpython-310.pyc,,
|
| 74 |
+
torchvision/datasets/__pycache__/voc.cpython-310.pyc,,
|
| 75 |
+
torchvision/datasets/__pycache__/widerface.cpython-310.pyc,,
|
| 76 |
+
torchvision/datasets/_optical_flow.py,sha256=UeB-0XfjtpTH6bCLYXm21RxscJ3Dyw8urQbIqb0vg_4,19489
|
| 77 |
+
torchvision/datasets/_stereo_matching.py,sha256=48CZpvJTW3hf2rPKbjs2HJKnOWuWwAYlmcqWcUpltgc,48774
|
| 78 |
+
torchvision/datasets/caltech.py,sha256=YiQMS2pnG4kFT3KyU7YufWJZrgd-0g65LvukRW7Sf60,8738
|
| 79 |
+
torchvision/datasets/celeba.py,sha256=ndPKsAdyKj1GAe8Y9Mz-7fIWi7WZwKsewnm9x42dqcU,8292
|
| 80 |
+
torchvision/datasets/cifar.py,sha256=TqdSHJ9UBtk0nVvnvlILIKgoGP-GjgbYCqjWhjCvf6I,5789
|
| 81 |
+
torchvision/datasets/cityscapes.py,sha256=zRAZzk80EmgQlTK2swfOnYwV-fIXGuQy8JHq1LL-vyo,10238
|
| 82 |
+
torchvision/datasets/clevr.py,sha256=jLqbFP5rfn_1mjP5Dc7eSnQKHVHf-TLCpR9aGmbrfuE,3416
|
| 83 |
+
torchvision/datasets/coco.py,sha256=8IrwZcvTvfHHatZwEUd8u1dZJHlXRLFqOegKXFO-nJw,3972
|
| 84 |
+
torchvision/datasets/country211.py,sha256=NbS59MbCQdy7jbwR4af3sENKEnOGbXoUvMuqxvr1Mzo,2401
|
| 85 |
+
torchvision/datasets/dtd.py,sha256=EfOGSCKAD2Cw6MmqiB9tdiVDzov6fqlke5KDa18_oWA,3975
|
| 86 |
+
torchvision/datasets/eurosat.py,sha256=cf0RE6i5ngu9cuKFj14-okg7bmLt4VC-5v1fRVd0qtg,2053
|
| 87 |
+
torchvision/datasets/fakedata.py,sha256=stJN5-VLGZDh7TNhGVoHFqvkhEoVU94m7IbrQe7z1-Y,2449
|
| 88 |
+
torchvision/datasets/fer2013.py,sha256=7cYCpTOji2T__c-JaczOu4_BfvH_wZmTYke-2JHWq_o,2762
|
| 89 |
+
torchvision/datasets/fgvc_aircraft.py,sha256=FEbGNLlTJKGx5jQeh-o-IamApP7ydIqR-nmh-1NCXp0,4566
|
| 90 |
+
torchvision/datasets/flickr.py,sha256=7u7IKEc-KawRlKV2QKdrbG-XCETR7qwF5uTfp7vNOkQ,5339
|
| 91 |
+
torchvision/datasets/flowers102.py,sha256=-EpegZlOcVhjt6mNbGMhl25ydxNU5Xx3zt6poUwh9Zw,4605
|
| 92 |
+
torchvision/datasets/folder.py,sha256=kyJs1XgN3NxbyG15asCc7NBVH-vSXZ02JaraPfumd4g,11910
|
| 93 |
+
torchvision/datasets/food101.py,sha256=b7kXyXrUSJkKVPXyA9q-iRH69-XntXPaSlut0YK0eC8,3717
|
| 94 |
+
torchvision/datasets/gtsrb.py,sha256=N17Rq6IQX4oPEAtg-VNqxv789IhSrFKGWmfWAuej1UQ,3742
|
| 95 |
+
torchvision/datasets/hmdb51.py,sha256=m9sxwdA67NNG2dt86iWwzJZG2QWulNun_eWmcM1tUUE,5909
|
| 96 |
+
torchvision/datasets/imagenet.py,sha256=Qbcq6iwyjx-tsEx_gidWBPMDX9e_2cIBSzOknuqpENo,8487
|
| 97 |
+
torchvision/datasets/imagenette.py,sha256=1D7qXMynuTzcgboZw2tSGH1rQchR94TjKmJgXOTx7WQ,4421
|
| 98 |
+
torchvision/datasets/inaturalist.py,sha256=QR9SDFtBg7a-2w9Muo_8ywJ87Q_Ym8uYaPYnro8Hnys,10107
|
| 99 |
+
torchvision/datasets/kinetics.py,sha256=Lexhk6CfVOF_XKJSLXIfgvNgSHyLMeZj45KPNIgaWpY,10329
|
| 100 |
+
torchvision/datasets/kitti.py,sha256=XYYf00fHIeW6QXcubfoLq-Ewf7P4fPrS2Vag6WVngLQ,5575
|
| 101 |
+
torchvision/datasets/lfw.py,sha256=6h2y3a094PjFdn_DJX6WGyipiDk_ALk3-ed1wVTl4Yg,10491
|
| 102 |
+
torchvision/datasets/lsun.py,sha256=5WOpSAtfzZ3FcuraDHF1SEmhs8M3JUMqrLBCvWJJsWk,5675
|
| 103 |
+
torchvision/datasets/mnist.py,sha256=_RgTVGD6ngRAJkhWxeQTVpQfBTB8UAecvaHnK-3SHhU,21554
|
| 104 |
+
torchvision/datasets/moving_mnist.py,sha256=PI5X5JAdYZKpv9yF-Dculrd8C_Q8CbYELWj9JyqqL3Q,3585
|
| 105 |
+
torchvision/datasets/omniglot.py,sha256=2nQW1nrmsyXeaXSPfPyTLWIhiMm6rZWKpDfdT8n817M,4091
|
| 106 |
+
torchvision/datasets/oxford_iiit_pet.py,sha256=4DCp7HHkx7Jw6uF9iiAQ0zXqfSBO8JnbCPoECfpwok4,5053
|
| 107 |
+
torchvision/datasets/pcam.py,sha256=0TJysveXU2wm8yvGDc-hdUZJGby9caGTYvh197lDXfo,5115
|
| 108 |
+
torchvision/datasets/phototour.py,sha256=0bLBhwI5lD5fmXyEtKFW0WTC3XQ2lRm433-8c0Y_ZUA,7924
|
| 109 |
+
torchvision/datasets/places365.py,sha256=oDbW8TCnEC9yBGg8AR-wuU6YY2iDxWcGiwImLyUXC8Q,7199
|
| 110 |
+
torchvision/datasets/rendered_sst2.py,sha256=N5dLI8VgnAsmqgXLoRPDGKGcXggP-3EcEtNP_qenw0I,3562
|
| 111 |
+
torchvision/datasets/samplers/__init__.py,sha256=W1ZtQpGLG6aoHylo1t8PEsHIVoWwso5bSFk9JzKfH8g,161
|
| 112 |
+
torchvision/datasets/samplers/__pycache__/__init__.cpython-310.pyc,,
|
| 113 |
+
torchvision/datasets/samplers/__pycache__/clip_sampler.cpython-310.pyc,,
|
| 114 |
+
torchvision/datasets/samplers/clip_sampler.py,sha256=1-k3bxToGpBlqC4-iyVDggtojA701NflW0nBRLK27tQ,6244
|
| 115 |
+
torchvision/datasets/sbd.py,sha256=4-iQW-j7nhK0FEbwFg2sonyc9TzvUQ20dRezJT2_lyY,5202
|
| 116 |
+
torchvision/datasets/sbu.py,sha256=L2axgbql5zryhzmYhaLKXfBlcttIewukYTJQL1C_LUc,4081
|
| 117 |
+
torchvision/datasets/semeion.py,sha256=scDtolOsTEqe-wyNocqsTVBX4J6Q4G_7PvF8PkIgI3A,3088
|
| 118 |
+
torchvision/datasets/stanford_cars.py,sha256=G2l_QoWfTi_ZiUQDQKZ45KnYBi5m43ie8bExTGUTdvI,4843
|
| 119 |
+
torchvision/datasets/stl10.py,sha256=sbrOzrkiCjPslLBBnakZ-63Qxz5GcAIXntnr_XtS7Xc,7233
|
| 120 |
+
torchvision/datasets/sun397.py,sha256=BGmuMDZak4qVnRFyHfAAuX-4MqlsdHx6zAd5peD0xxE,2748
|
| 121 |
+
torchvision/datasets/svhn.py,sha256=kIuOppQKsmvc3kVPU8KcEGMWH0TkTHS5mHQeg_8hgvo,4768
|
| 122 |
+
torchvision/datasets/ucf101.py,sha256=M6r2zlajjMDC5uiFeHeZUooQsvpmNG0ah2EjyZ0TjmQ,5472
|
| 123 |
+
torchvision/datasets/usps.py,sha256=slohXYwBzIQtBd3idomN6hSn8U1x3dlB6nmBo_Vw2Ys,3440
|
| 124 |
+
torchvision/datasets/utils.py,sha256=qcNQti5JtJFY1AZgZdVmYFVZ_hrlj2PxX_mtH4z-X4M,18340
|
| 125 |
+
torchvision/datasets/video_utils.py,sha256=FnQBIS7lLZqcQTi7nfiNmWMnR73izKXXYMLKOqJfBOE,17203
|
| 126 |
+
torchvision/datasets/vision.py,sha256=LRazIZi1PtybjnjlV--SjaZ-7OJHDTmW3M2Nh1tV9mM,4206
|
| 127 |
+
torchvision/datasets/voc.py,sha256=t8pBKMh-BuOTELjj8m2biyXhJwVGW6z16BvyjrPg-kM,8760
|
| 128 |
+
torchvision/datasets/widerface.py,sha256=dFcxBKrAyTx5J_mv42amHeuy2dHnFXQmIhE5RWC3EwI,8117
|
| 129 |
+
torchvision/extension.py,sha256=YWBDURfCFXSmRvXi2iEg2L0hafN2-RnybpImh9JAUtQ,3141
|
| 130 |
+
torchvision/image.so,sha256=s5h1kwLvwddiml4Peyv7wWwXPqGu9IHnfmn6Fg6Aysg,347705
|
| 131 |
+
torchvision/io/__init__.py,sha256=jRZ5dfLN6oby_jI2cAGIWl_o4GTO_djxVDIpXFElsVU,1478
|
| 132 |
+
torchvision/io/__pycache__/__init__.cpython-310.pyc,,
|
| 133 |
+
torchvision/io/__pycache__/_load_gpu_decoder.cpython-310.pyc,,
|
| 134 |
+
torchvision/io/__pycache__/_video_opt.cpython-310.pyc,,
|
| 135 |
+
torchvision/io/__pycache__/image.cpython-310.pyc,,
|
| 136 |
+
torchvision/io/__pycache__/video.cpython-310.pyc,,
|
| 137 |
+
torchvision/io/__pycache__/video_reader.cpython-310.pyc,,
|
| 138 |
+
torchvision/io/_load_gpu_decoder.py,sha256=nvR0HG0B2-GEYpFiooPELIOgfL6X3gUetPgFGuH4nWs,174
|
| 139 |
+
torchvision/io/_video_opt.py,sha256=iClXtPsNK4Fph0-Nzx4MzIBbT-rElpZ6JOKBd8X1Vbk,20390
|
| 140 |
+
torchvision/io/image.py,sha256=YOCb9u_yBSA_GP_3sJ8MSpRcGsSd8yKXdlIXnUUX2nA,9878
|
| 141 |
+
torchvision/io/video.py,sha256=89WNezkYxKm59GtX__Ta_aaPukD6p7nm7bkuMiV27WI,15674
|
| 142 |
+
torchvision/io/video_reader.py,sha256=sXB4hzZl1piRDBBnhH6gdMITzOqeSEtcyGvrttI3MJU,11350
|
| 143 |
+
torchvision/models/__init__.py,sha256=A8GQPE1bl3oUHpuD9ND53DV557IPY4459FNLW6sVXGI,865
|
| 144 |
+
torchvision/models/__pycache__/__init__.cpython-310.pyc,,
|
| 145 |
+
torchvision/models/__pycache__/_api.cpython-310.pyc,,
|
| 146 |
+
torchvision/models/__pycache__/_meta.cpython-310.pyc,,
|
| 147 |
+
torchvision/models/__pycache__/_utils.cpython-310.pyc,,
|
| 148 |
+
torchvision/models/__pycache__/alexnet.cpython-310.pyc,,
|
| 149 |
+
torchvision/models/__pycache__/convnext.cpython-310.pyc,,
|
| 150 |
+
torchvision/models/__pycache__/densenet.cpython-310.pyc,,
|
| 151 |
+
torchvision/models/__pycache__/efficientnet.cpython-310.pyc,,
|
| 152 |
+
torchvision/models/__pycache__/feature_extraction.cpython-310.pyc,,
|
| 153 |
+
torchvision/models/__pycache__/googlenet.cpython-310.pyc,,
|
| 154 |
+
torchvision/models/__pycache__/inception.cpython-310.pyc,,
|
| 155 |
+
torchvision/models/__pycache__/maxvit.cpython-310.pyc,,
|
| 156 |
+
torchvision/models/__pycache__/mnasnet.cpython-310.pyc,,
|
| 157 |
+
torchvision/models/__pycache__/mobilenet.cpython-310.pyc,,
|
| 158 |
+
torchvision/models/__pycache__/mobilenetv2.cpython-310.pyc,,
|
| 159 |
+
torchvision/models/__pycache__/mobilenetv3.cpython-310.pyc,,
|
| 160 |
+
torchvision/models/__pycache__/regnet.cpython-310.pyc,,
|
| 161 |
+
torchvision/models/__pycache__/resnet.cpython-310.pyc,,
|
| 162 |
+
torchvision/models/__pycache__/shufflenetv2.cpython-310.pyc,,
|
| 163 |
+
torchvision/models/__pycache__/squeezenet.cpython-310.pyc,,
|
| 164 |
+
torchvision/models/__pycache__/swin_transformer.cpython-310.pyc,,
|
| 165 |
+
torchvision/models/__pycache__/vgg.cpython-310.pyc,,
|
| 166 |
+
torchvision/models/__pycache__/vision_transformer.cpython-310.pyc,,
|
| 167 |
+
torchvision/models/_api.py,sha256=uIIJnxX1zYMNpdvJ0haSq15_XlR1QteFZBYVAdtEheg,10054
|
| 168 |
+
torchvision/models/_meta.py,sha256=fqpeQBsf9EEYbmApQ8Q0LKyM9_UFwjireII5mwDbwJY,28875
|
| 169 |
+
torchvision/models/_utils.py,sha256=S8uDD7maNefy-fEW6mpz8dFU68acK1HxN0kt1qpkkDo,10893
|
| 170 |
+
torchvision/models/alexnet.py,sha256=dvBZLVH60TOTHCNNkWg0TFLtuJ5Ghh_xXN73r3Vyq58,4488
|
| 171 |
+
torchvision/models/convnext.py,sha256=tP73tH-us6h2KSdVcPypEX9Izk5lsr82KsGT15mj4NE,15326
|
| 172 |
+
torchvision/models/densenet.py,sha256=qUhW4pNpZtj8JVkvc2Rjo9svDxL_HMqCqXFWggu9M1o,16804
|
| 173 |
+
torchvision/models/detection/__init__.py,sha256=JwYm_fTGO_FeRg4eTOQLwQPZ9lC9jheZ-QEoJgqKTjg,168
|
| 174 |
+
torchvision/models/detection/__pycache__/__init__.cpython-310.pyc,,
|
| 175 |
+
torchvision/models/detection/__pycache__/_utils.cpython-310.pyc,,
|
| 176 |
+
torchvision/models/detection/__pycache__/anchor_utils.cpython-310.pyc,,
|
| 177 |
+
torchvision/models/detection/__pycache__/backbone_utils.cpython-310.pyc,,
|
| 178 |
+
torchvision/models/detection/__pycache__/faster_rcnn.cpython-310.pyc,,
|
| 179 |
+
torchvision/models/detection/__pycache__/fcos.cpython-310.pyc,,
|
| 180 |
+
torchvision/models/detection/__pycache__/generalized_rcnn.cpython-310.pyc,,
|
| 181 |
+
torchvision/models/detection/__pycache__/image_list.cpython-310.pyc,,
|
| 182 |
+
torchvision/models/detection/__pycache__/keypoint_rcnn.cpython-310.pyc,,
|
| 183 |
+
torchvision/models/detection/__pycache__/mask_rcnn.cpython-310.pyc,,
|
| 184 |
+
torchvision/models/detection/__pycache__/retinanet.cpython-310.pyc,,
|
| 185 |
+
torchvision/models/detection/__pycache__/roi_heads.cpython-310.pyc,,
|
| 186 |
+
torchvision/models/detection/__pycache__/rpn.cpython-310.pyc,,
|
| 187 |
+
torchvision/models/detection/__pycache__/ssd.cpython-310.pyc,,
|
| 188 |
+
torchvision/models/detection/__pycache__/ssdlite.cpython-310.pyc,,
|
| 189 |
+
torchvision/models/detection/__pycache__/transform.cpython-310.pyc,,
|
| 190 |
+
torchvision/models/detection/_utils.py,sha256=2y3FQ4F5yXhFM7VIWmu_70FpKgZjxdT_ucfzYwi3ZUQ,22127
|
| 191 |
+
torchvision/models/detection/anchor_utils.py,sha256=8Ix1Vp3i2kgJGr6esie3rw0_yAjtrUSvLXVKPaoZeQo,11859
|
| 192 |
+
torchvision/models/detection/backbone_utils.py,sha256=4FyzocR6YS7cG5IJTMRwC44tupbXQDA_Ru_8qqaju2I,10548
|
| 193 |
+
torchvision/models/detection/faster_rcnn.py,sha256=OldpozE7KcICV1C70uUHWyTAhr9soAzbT99LSEMtl1g,36767
|
| 194 |
+
torchvision/models/detection/fcos.py,sha256=12mQ37D3hpXQ7uGBGyWMtzwgjJQPu6adwvjzB-wxnw0,33990
|
| 195 |
+
torchvision/models/detection/generalized_rcnn.py,sha256=4-Dp8Vx-SjDDSZ7TsZ11rmkvEH336aLuSOlERXiQ7fs,4743
|
| 196 |
+
torchvision/models/detection/image_list.py,sha256=SUJ3xMn-1xc6ivYZUNIdWBh3RH9xD8EtCdpsXnPI_iM,783
|
| 197 |
+
torchvision/models/detection/keypoint_rcnn.py,sha256=8GDpiBk_Rcg0yfCril5GkpGkA4JN4eWihPjIMsup3GM,21760
|
| 198 |
+
torchvision/models/detection/mask_rcnn.py,sha256=K3oizvu-Na_Et_e6nZx2flbdKFQaeX6WV5XH-zmVlEU,26501
|
| 199 |
+
torchvision/models/detection/retinanet.py,sha256=_31tsj9tjTtrtSY5LUbqHqEXs6vr4RTEi5mR02cGZak,37055
|
| 200 |
+
torchvision/models/detection/roi_heads.py,sha256=Uh9950xZUEmejwD2pRRhKvqNV0bY_G2Om8yGC2EdDDg,33822
|
| 201 |
+
torchvision/models/detection/rpn.py,sha256=_OFaGmf7cXJSkCJX6tX7OROXIYkTNJ52gQlW-Aofdig,15735
|
| 202 |
+
torchvision/models/detection/ssd.py,sha256=tbsgVbRD36WrjkZEB4xi1fvOXT62ry0p8G_Sd-j5CrY,28979
|
| 203 |
+
torchvision/models/detection/ssdlite.py,sha256=HQ8XD36fSId_OiMecjkLxcEtQsS-1VvryMzYc8emhCk,13215
|
| 204 |
+
torchvision/models/detection/transform.py,sha256=Ma0CDvLCMlk3MxS3asXcDxrSosRLacaLpi-T34LXm1A,12189
|
| 205 |
+
torchvision/models/efficientnet.py,sha256=4qyeoXkYGFyUsBDt8TygDYycMMt1zhGwB_l4PmoPv4g,43090
|
| 206 |
+
torchvision/models/feature_extraction.py,sha256=UCoGdvS8_tnuIZp9-YE5atrn96Gk6mZSWAYPti_nnCg,25577
|
| 207 |
+
torchvision/models/googlenet.py,sha256=ni7VlSJW2_zG0Adxx56fuN5t4yI6vROBAuAu06-V4f0,12806
|
| 208 |
+
torchvision/models/inception.py,sha256=ifrLErzOVG-vlwQOMXLX5yMgcpHxCQQ17L7Wacn5QhQ,18851
|
| 209 |
+
torchvision/models/maxvit.py,sha256=CK7u0cZnclBHDaLn7cwEonDgD1o6gtcQIYZxw3nh2rs,31953
|
| 210 |
+
torchvision/models/mnasnet.py,sha256=h9jY1TupaChZj9khnXya_l4O1exUWhWOOCmhJCCImKc,17574
|
| 211 |
+
torchvision/models/mobilenet.py,sha256=lSRVxw2TL3LFBwCadvyvH6n3GzqUTnK2-rhX3MOgSrs,211
|
| 212 |
+
torchvision/models/mobilenetv2.py,sha256=v9cRBAp7_C_50JFkjGZ0luvuh45oCYgYn37pcG2UL8o,9710
|
| 213 |
+
torchvision/models/mobilenetv3.py,sha256=-Xk32m_Wdn-ap8wCL4Tl7wjiROIwDwhasInYTMwwOrE,16279
|
| 214 |
+
torchvision/models/optical_flow/__init__.py,sha256=0zRlMWQJCjFqoUafUXVgO89-z7em7tACo9E8hHSq9RQ,20
|
| 215 |
+
torchvision/models/optical_flow/__pycache__/__init__.cpython-310.pyc,,
|
| 216 |
+
torchvision/models/optical_flow/__pycache__/_utils.cpython-310.pyc,,
|
| 217 |
+
torchvision/models/optical_flow/__pycache__/raft.cpython-310.pyc,,
|
| 218 |
+
torchvision/models/optical_flow/_utils.py,sha256=v-tQJzYmYukrD1sQAE-5j5jxyvComwF1UdGkz5tVTLw,2077
|
| 219 |
+
torchvision/models/optical_flow/raft.py,sha256=FpSLPXisugu5Rzp_D5XCr037snBapMJ0dDPrw9c3CNk,39995
|
| 220 |
+
torchvision/models/quantization/__init__.py,sha256=gqFM7zI4UUHKKBDJAumozOn7xPL0JtvyNS8Ejz6QXp0,125
|
| 221 |
+
torchvision/models/quantization/__pycache__/__init__.cpython-310.pyc,,
|
| 222 |
+
torchvision/models/quantization/__pycache__/googlenet.cpython-310.pyc,,
|
| 223 |
+
torchvision/models/quantization/__pycache__/inception.cpython-310.pyc,,
|
| 224 |
+
torchvision/models/quantization/__pycache__/mobilenet.cpython-310.pyc,,
|
| 225 |
+
torchvision/models/quantization/__pycache__/mobilenetv2.cpython-310.pyc,,
|
| 226 |
+
torchvision/models/quantization/__pycache__/mobilenetv3.cpython-310.pyc,,
|
| 227 |
+
torchvision/models/quantization/__pycache__/resnet.cpython-310.pyc,,
|
| 228 |
+
torchvision/models/quantization/__pycache__/shufflenetv2.cpython-310.pyc,,
|
| 229 |
+
torchvision/models/quantization/__pycache__/utils.cpython-310.pyc,,
|
| 230 |
+
torchvision/models/quantization/googlenet.py,sha256=C-8lm9TnjkEuwu6zaPp0r5mb0QMYvTMGOtz2--s1IFo,8080
|
| 231 |
+
torchvision/models/quantization/inception.py,sha256=hg8K1QNk7T-Qo3zOB47eupS3Thu_RjVI6mG2HzAEx8M,10815
|
| 232 |
+
torchvision/models/quantization/mobilenet.py,sha256=lSRVxw2TL3LFBwCadvyvH6n3GzqUTnK2-rhX3MOgSrs,211
|
| 233 |
+
torchvision/models/quantization/mobilenetv2.py,sha256=ggpNLU4_JkyMn8IPTgj1p0xx_Wvspcii2Wd3ISj5tBE,5883
|
| 234 |
+
torchvision/models/quantization/mobilenetv3.py,sha256=PVWmSP62Pn8hQkd682l6uYFLQp1nxZltMOE-FhhO9OU,9230
|
| 235 |
+
torchvision/models/quantization/resnet.py,sha256=9Hb6KyPv33Jj1A6JciXvGX06q0RkwwP10u8GxFfmorM,17939
|
| 236 |
+
torchvision/models/quantization/shufflenetv2.py,sha256=eS2y34ZTG03dNJgtVJ2qSXQWZ22PHIWBYeC8cbvI1yI,16884
|
| 237 |
+
torchvision/models/quantization/utils.py,sha256=n8mWsK9_Ek_M2AqGKPLoLlcKaYGH2PrF2l5_W84oBMk,2058
|
| 238 |
+
torchvision/models/regnet.py,sha256=-7s5n0qzXZPR9HgzOk9aj1sv9dWZ3AxnP7CmZRdUeZI,63553
|
| 239 |
+
torchvision/models/resnet.py,sha256=dJmlBZrXsaH491Q8BLShN5UUD62DfDhTC0j_XZYQv24,38932
|
| 240 |
+
torchvision/models/segmentation/__init__.py,sha256=TGk6UdVXAMtwBpYalrvdXZnmSwqzTDOT1lgKrfzhHrQ,66
|
| 241 |
+
torchvision/models/segmentation/__pycache__/__init__.cpython-310.pyc,,
|
| 242 |
+
torchvision/models/segmentation/__pycache__/_utils.cpython-310.pyc,,
|
| 243 |
+
torchvision/models/segmentation/__pycache__/deeplabv3.cpython-310.pyc,,
|
| 244 |
+
torchvision/models/segmentation/__pycache__/fcn.cpython-310.pyc,,
|
| 245 |
+
torchvision/models/segmentation/__pycache__/lraspp.cpython-310.pyc,,
|
| 246 |
+
torchvision/models/segmentation/_utils.py,sha256=QfyqCtH_MJnIkKW5m-98GZD2MjtPYLtPTDi79pcIGhs,1197
|
| 247 |
+
torchvision/models/segmentation/deeplabv3.py,sha256=wVgXz21sugSck2KbG7WD-wgMwCAW0wd8jBGhgue300s,15015
|
| 248 |
+
torchvision/models/segmentation/fcn.py,sha256=I1FqaZZVPc3Fbg_7E2L5qpumnupxBYc7KYsW03EG_Cs,8973
|
| 249 |
+
torchvision/models/segmentation/lraspp.py,sha256=dt5DJ_qbDZlEM0SIuN87JU43JHfVlb8Oepp76KDv5tw,7643
|
| 250 |
+
torchvision/models/shufflenetv2.py,sha256=84FiPfkhJpSw6Q9Jmaug5MW5qmWCO3VhAPF61EiMn7Q,15444
|
| 251 |
+
torchvision/models/squeezenet.py,sha256=apjFPEI5nr_493bAQsR245EorzaMYXVQSqdcveyAfy0,8763
|
| 252 |
+
torchvision/models/swin_transformer.py,sha256=VwvnImWcjblashj0OONycDJnIkz-zRDpm365v_a0-zo,39337
|
| 253 |
+
torchvision/models/vgg.py,sha256=jYjIoY2jtKAc-aURCQsvbgBxup1Gh4fVZSt2NzFLlZY,19225
|
| 254 |
+
torchvision/models/video/__init__.py,sha256=O4HB-RaXgCtnvpMDAuMBaIeKIiYEkNxra_fmAHLUIJM,93
|
| 255 |
+
torchvision/models/video/__pycache__/__init__.cpython-310.pyc,,
|
| 256 |
+
torchvision/models/video/__pycache__/mvit.cpython-310.pyc,,
|
| 257 |
+
torchvision/models/video/__pycache__/resnet.cpython-310.pyc,,
|
| 258 |
+
torchvision/models/video/__pycache__/s3d.cpython-310.pyc,,
|
| 259 |
+
torchvision/models/video/__pycache__/swin_transformer.cpython-310.pyc,,
|
| 260 |
+
torchvision/models/video/mvit.py,sha256=0AZ31K5QcUBWZUUPTI1FCCM2Fma95bPs1o82zzpw2i0,32998
|
| 261 |
+
torchvision/models/video/resnet.py,sha256=RUnbUXFmoWNo_XbEKLmVSM8LUDcyv6jGZJ8GGpZi_6U,16771
|
| 262 |
+
torchvision/models/video/s3d.py,sha256=jx9gMP18Bzb7UO3vjejVBHlrCrJPdWFDfTn7XeU5kMg,7815
|
| 263 |
+
torchvision/models/video/swin_transformer.py,sha256=3GMyPGPeMcwJ1p9TGiRbpIlP-G7Qv_jWNbZmqIwMNyA,27688
|
| 264 |
+
torchvision/models/vision_transformer.py,sha256=O4mdBjYFsp-HTZA9bXfux_wJzIPRv1uS43PjuNh52zc,32136
|
| 265 |
+
torchvision/ops/__init__.py,sha256=eVv16QSBwgKaojOUHMPCy4ou9ZeFh-HoCV4DpqrZG4U,1928
|
| 266 |
+
torchvision/ops/__pycache__/__init__.cpython-310.pyc,,
|
| 267 |
+
torchvision/ops/__pycache__/_box_convert.cpython-310.pyc,,
|
| 268 |
+
torchvision/ops/__pycache__/_register_onnx_ops.cpython-310.pyc,,
|
| 269 |
+
torchvision/ops/__pycache__/_utils.cpython-310.pyc,,
|
| 270 |
+
torchvision/ops/__pycache__/boxes.cpython-310.pyc,,
|
| 271 |
+
torchvision/ops/__pycache__/ciou_loss.cpython-310.pyc,,
|
| 272 |
+
torchvision/ops/__pycache__/deform_conv.cpython-310.pyc,,
|
| 273 |
+
torchvision/ops/__pycache__/diou_loss.cpython-310.pyc,,
|
| 274 |
+
torchvision/ops/__pycache__/drop_block.cpython-310.pyc,,
|
| 275 |
+
torchvision/ops/__pycache__/feature_pyramid_network.cpython-310.pyc,,
|
| 276 |
+
torchvision/ops/__pycache__/focal_loss.cpython-310.pyc,,
|
| 277 |
+
torchvision/ops/__pycache__/giou_loss.cpython-310.pyc,,
|
| 278 |
+
torchvision/ops/__pycache__/misc.cpython-310.pyc,,
|
| 279 |
+
torchvision/ops/__pycache__/poolers.cpython-310.pyc,,
|
| 280 |
+
torchvision/ops/__pycache__/ps_roi_align.cpython-310.pyc,,
|
| 281 |
+
torchvision/ops/__pycache__/ps_roi_pool.cpython-310.pyc,,
|
| 282 |
+
torchvision/ops/__pycache__/roi_align.cpython-310.pyc,,
|
| 283 |
+
torchvision/ops/__pycache__/roi_pool.cpython-310.pyc,,
|
| 284 |
+
torchvision/ops/__pycache__/stochastic_depth.cpython-310.pyc,,
|
| 285 |
+
torchvision/ops/_box_convert.py,sha256=_bRRpErwk03rcPuscO1tCI9v3l88oNlDBDl2jzPlbKo,2409
|
| 286 |
+
torchvision/ops/_register_onnx_ops.py,sha256=Fyb1kC2m2OqZdfW_M86pt9-S66e1qNUhXNu1EQRa034,4181
|
| 287 |
+
torchvision/ops/_utils.py,sha256=pVHPpsmx6XcfGjUVk-XAEnd8QJBkrw_cT6fO_IwICE4,3630
|
| 288 |
+
torchvision/ops/boxes.py,sha256=_IpyT5nPIt0E9byfI4rjs8aXl-UQNGngLZZYiDYwqEg,15495
|
| 289 |
+
torchvision/ops/ciou_loss.py,sha256=3HClrMMKOJ3bndIUinNp3cp6Cim4-ZmmfuLn1-NPDUo,2756
|
| 290 |
+
torchvision/ops/deform_conv.py,sha256=fJxkVR_p_OQMzMja4flvmTgqDPvrOOcwzDG8bV7Q7pE,6990
|
| 291 |
+
torchvision/ops/diou_loss.py,sha256=tssNJhII4WT-wmidFS8gFNteQIAJz-Nd1Q7Trz1BjIY,3362
|
| 292 |
+
torchvision/ops/drop_block.py,sha256=A4EGIl7txrU_QmkI1N0W9hfd8tq8yx6zq32oYXaddLQ,5855
|
| 293 |
+
torchvision/ops/feature_pyramid_network.py,sha256=mfkaygxRz-0TAdTMq2fCAL-E0WxlRnTfdb-s_J5qPE4,8702
|
| 294 |
+
torchvision/ops/focal_loss.py,sha256=9kFqGyA0-hodRw9Au74k-FuS14OhsAvbFxDGvpx08Sg,2261
|
| 295 |
+
torchvision/ops/giou_loss.py,sha256=OXSaMZDZ0qy7jgaQ9exB_DMQXzcATBAFiIjzSlOV-bQ,2696
|
| 296 |
+
torchvision/ops/misc.py,sha256=yFnK7GT9OCMfDrn4NtQXKdh5broi1xocL94SoyqhWuw,13572
|
| 297 |
+
torchvision/ops/poolers.py,sha256=zzYhH7poMwGlYxDvAvCaL9emg9X7sM4xZFLEy0zvv5s,11920
|
| 298 |
+
torchvision/ops/ps_roi_align.py,sha256=4iAbeUVTessAcxvJhuARN_aFGUTZC9R4KrKC_mBH3MQ,3625
|
| 299 |
+
torchvision/ops/ps_roi_pool.py,sha256=jOv-2pAZdLFvvt4r4NwiRfxU5WAOy_vi6gxZjMvlusw,2870
|
| 300 |
+
torchvision/ops/roi_align.py,sha256=wKwVi4cMUKQjvmJlnSYHOTrZX081YfCzxOjKOuXqL1M,10756
|
| 301 |
+
torchvision/ops/roi_pool.py,sha256=70ou6Xc7qJxKe3SC54QIW3L99PoS0gLlwGocaYDbD2w,2943
|
| 302 |
+
torchvision/ops/stochastic_depth.py,sha256=ISZ9noJyZLxpTG-wa2VmPs66qjhVsP7ZxWHvumWSP3U,2236
|
| 303 |
+
torchvision/transforms/__init__.py,sha256=EMft42B1JAiU11J1rxIN4Znis6EJPbp-bsGjAzH-24M,53
|
| 304 |
+
torchvision/transforms/__pycache__/__init__.cpython-310.pyc,,
|
| 305 |
+
torchvision/transforms/__pycache__/_functional_pil.cpython-310.pyc,,
|
| 306 |
+
torchvision/transforms/__pycache__/_functional_tensor.cpython-310.pyc,,
|
| 307 |
+
torchvision/transforms/__pycache__/_functional_video.cpython-310.pyc,,
|
| 308 |
+
torchvision/transforms/__pycache__/_presets.cpython-310.pyc,,
|
| 309 |
+
torchvision/transforms/__pycache__/_transforms_video.cpython-310.pyc,,
|
| 310 |
+
torchvision/transforms/__pycache__/autoaugment.cpython-310.pyc,,
|
| 311 |
+
torchvision/transforms/__pycache__/functional.cpython-310.pyc,,
|
| 312 |
+
torchvision/transforms/__pycache__/transforms.cpython-310.pyc,,
|
| 313 |
+
torchvision/transforms/_functional_pil.py,sha256=nmvbsk0KIjKDZ1iSPwiuFHNWbGvMmTeYdeoHhYXPolM,12112
|
| 314 |
+
torchvision/transforms/_functional_tensor.py,sha256=Bq_uSppeO2dT88xa-XncQJ_pWXY8t97bLGMryMIai8Q,33834
|
| 315 |
+
torchvision/transforms/_functional_video.py,sha256=YcV557YglbJsq9SRGJHFoRbtxawiLSJ1oM5rV75OyqQ,3857
|
| 316 |
+
torchvision/transforms/_presets.py,sha256=rw5UzlFDIgRnpak51szLGQaLs5gpMm6ANv6bpuwuHgs,8484
|
| 317 |
+
torchvision/transforms/_transforms_video.py,sha256=Buz5LCWVPGiEonHE-cXIXfbkBhNc0qxVraxkNdxKp8o,4950
|
| 318 |
+
torchvision/transforms/autoaugment.py,sha256=JcbdEDbR0-OqTE4cwkhVSB45woFZQ_Fq5xmjFu_3bjg,28243
|
| 319 |
+
torchvision/transforms/functional.py,sha256=M_Bg_G-XNF60UUpebbuuMA5J4o4XaV5oRckKuP9MhjI,66890
|
| 320 |
+
torchvision/transforms/transforms.py,sha256=HFxc7sIlSMHxaWRIbhb1JB-MqX8z47o0WuXBp4YL-Ro,85557
|
| 321 |
+
torchvision/transforms/v2/__init__.py,sha256=9HBFpAWEINr9uA25guqjOKIVz5fHcqriu7BCp1hkHV0,1419
|
| 322 |
+
torchvision/transforms/v2/__pycache__/__init__.cpython-310.pyc,,
|
| 323 |
+
torchvision/transforms/v2/__pycache__/_augment.cpython-310.pyc,,
|
| 324 |
+
torchvision/transforms/v2/__pycache__/_auto_augment.cpython-310.pyc,,
|
| 325 |
+
torchvision/transforms/v2/__pycache__/_color.cpython-310.pyc,,
|
| 326 |
+
torchvision/transforms/v2/__pycache__/_container.cpython-310.pyc,,
|
| 327 |
+
torchvision/transforms/v2/__pycache__/_deprecated.cpython-310.pyc,,
|
| 328 |
+
torchvision/transforms/v2/__pycache__/_geometry.cpython-310.pyc,,
|
| 329 |
+
torchvision/transforms/v2/__pycache__/_meta.cpython-310.pyc,,
|
| 330 |
+
torchvision/transforms/v2/__pycache__/_misc.cpython-310.pyc,,
|
| 331 |
+
torchvision/transforms/v2/__pycache__/_temporal.cpython-310.pyc,,
|
| 332 |
+
torchvision/transforms/v2/__pycache__/_transform.cpython-310.pyc,,
|
| 333 |
+
torchvision/transforms/v2/__pycache__/_type_conversion.cpython-310.pyc,,
|
| 334 |
+
torchvision/transforms/v2/__pycache__/_utils.cpython-310.pyc,,
|
| 335 |
+
torchvision/transforms/v2/_augment.py,sha256=tdewPc97m2cHlXeqLBhUh-7zSf3alGV1FdECKCVTz0k,13796
|
| 336 |
+
torchvision/transforms/v2/_auto_augment.py,sha256=yKbEyFYI6mU9GVbyHROxr4lYKRDOg0fBoeSxqydpt_8,31780
|
| 337 |
+
torchvision/transforms/v2/_color.py,sha256=5d5rO3N7zN6EPRBG_m4vQDsmM5HD1KGy7O5Y94GP9RQ,16550
|
| 338 |
+
torchvision/transforms/v2/_container.py,sha256=SFh-FU8ceir934hxS_VkbVQq0SxzGSULPaYpouJJhPs,6055
|
| 339 |
+
torchvision/transforms/v2/_deprecated.py,sha256=a4ZPqNkZLd8yjDoCsJOn8dHPa5TfIkxKrE2IMetSviU,1837
|
| 340 |
+
torchvision/transforms/v2/_geometry.py,sha256=PzSUAhbxx3QSMIOdDbUUpro2Tb3qbvWDFyP8_6GfeD0,66930
|
| 341 |
+
torchvision/transforms/v2/_meta.py,sha256=yGpK7GsIdJ9Ri1Ds83h7kOWfZEOHGqGlAp5Tfyq4WjY,1489
|
| 342 |
+
torchvision/transforms/v2/_misc.py,sha256=YWFbbDiMti4cxMGA2eF2RyPDVjT47QiQOEAlCe0VH4s,17125
|
| 343 |
+
torchvision/transforms/v2/_temporal.py,sha256=ByHqYqy1KO1Rd-Cg-eynHQEnF4y7OaMGIeO44kl8QJw,906
|
| 344 |
+
torchvision/transforms/v2/_transform.py,sha256=008PBMswQWIc7dEmhWqm772_O4ciDY3rycGu08nhcME,8476
|
| 345 |
+
torchvision/transforms/v2/_type_conversion.py,sha256=f3J1wYeB_zTaF8mxIjoudDKCiljmWqLGszSS9DN5EsQ,2860
|
| 346 |
+
torchvision/transforms/v2/_utils.py,sha256=KSkGow8EwtP4OMwdtd6En1b08EA-PTKVZH36FV7IUSQ,8706
|
| 347 |
+
torchvision/transforms/v2/functional/__init__.py,sha256=QROAo8DCNo5i3Kp1XKuf1U0k6ThVRq93Z5Dwf40ptUI,4217
|
| 348 |
+
torchvision/transforms/v2/functional/__pycache__/__init__.cpython-310.pyc,,
|
| 349 |
+
torchvision/transforms/v2/functional/__pycache__/_augment.cpython-310.pyc,,
|
| 350 |
+
torchvision/transforms/v2/functional/__pycache__/_color.cpython-310.pyc,,
|
| 351 |
+
torchvision/transforms/v2/functional/__pycache__/_deprecated.cpython-310.pyc,,
|
| 352 |
+
torchvision/transforms/v2/functional/__pycache__/_geometry.cpython-310.pyc,,
|
| 353 |
+
torchvision/transforms/v2/functional/__pycache__/_meta.cpython-310.pyc,,
|
| 354 |
+
torchvision/transforms/v2/functional/__pycache__/_misc.cpython-310.pyc,,
|
| 355 |
+
torchvision/transforms/v2/functional/__pycache__/_temporal.cpython-310.pyc,,
|
| 356 |
+
torchvision/transforms/v2/functional/__pycache__/_type_conversion.cpython-310.pyc,,
|
| 357 |
+
torchvision/transforms/v2/functional/__pycache__/_utils.cpython-310.pyc,,
|
| 358 |
+
torchvision/transforms/v2/functional/_augment.py,sha256=S4ZHPCL52aJPz1QS5RHZhUH59MrX73Motn6J0M_8VGU,1681
|
| 359 |
+
torchvision/transforms/v2/functional/_color.py,sha256=GDq4iXEsvURWVasGOhgkf_LewINGQ43BH5feDdomI3I,28982
|
| 360 |
+
torchvision/transforms/v2/functional/_deprecated.py,sha256=ycYZLDwDyd612aPbTKIV3gqhCRLMdF03MQELct4LeGs,801
|
| 361 |
+
torchvision/transforms/v2/functional/_geometry.py,sha256=X4Y5hWuqI9ULAAq5U32ie16Lg4XPrbzp-zTRT2ICsyM,85714
|
| 362 |
+
torchvision/transforms/v2/functional/_meta.py,sha256=zAAb2k1iUA9-OjktIdRZ01FtDKsH-hzc_4Q4_G3eZto,10356
|
| 363 |
+
torchvision/transforms/v2/functional/_misc.py,sha256=JunoMZBMHZ0XWklbwbipcpLUcFN8rjEMtUg0-db5MMQ,10706
|
| 364 |
+
torchvision/transforms/v2/functional/_temporal.py,sha256=24CQCXXO12TnW7aUiUQdrk5DRSpTPONjjC4jaGh3lH4,1136
|
| 365 |
+
torchvision/transforms/v2/functional/_type_conversion.py,sha256=V6R0zpykrTBXGwCZwg6053QRmgCATJlGUXWA5RjfyGo,854
|
| 366 |
+
torchvision/transforms/v2/functional/_utils.py,sha256=tsmwIF37Z9QnP9x3x4hAs1hLrcvL78GLkuO6Rq1EUTk,5479
|
| 367 |
+
torchvision/tv_tensors/__init__.py,sha256=C6N8p5aulpehsOBBmH1cPIY1xiOSASZVBfnlXgGvR_s,1509
|
| 368 |
+
torchvision/tv_tensors/__pycache__/__init__.cpython-310.pyc,,
|
| 369 |
+
torchvision/tv_tensors/__pycache__/_bounding_boxes.cpython-310.pyc,,
|
| 370 |
+
torchvision/tv_tensors/__pycache__/_dataset_wrapper.cpython-310.pyc,,
|
| 371 |
+
torchvision/tv_tensors/__pycache__/_image.cpython-310.pyc,,
|
| 372 |
+
torchvision/tv_tensors/__pycache__/_mask.cpython-310.pyc,,
|
| 373 |
+
torchvision/tv_tensors/__pycache__/_torch_function_helpers.cpython-310.pyc,,
|
| 374 |
+
torchvision/tv_tensors/__pycache__/_tv_tensor.cpython-310.pyc,,
|
| 375 |
+
torchvision/tv_tensors/__pycache__/_video.cpython-310.pyc,,
|
| 376 |
+
torchvision/tv_tensors/_bounding_boxes.py,sha256=R7qoG46pnmhGnhYfCOGVC5lMgeJs54p32GmjUVxAMNw,4471
|
| 377 |
+
torchvision/tv_tensors/_dataset_wrapper.py,sha256=cNE2GOuHquMfA2WD41m6wT-gfoaIxOyhTQIEMo5TKEo,24215
|
| 378 |
+
torchvision/tv_tensors/_image.py,sha256=OQIp2X_iYYIktxC8XjAFew-8NIgYqIRBBoVuFHelWVc,1904
|
| 379 |
+
torchvision/tv_tensors/_mask.py,sha256=-mN34OF6j-BYrW4B9ZA8fiWfB2ZzBBJFpGvryRFRDj0,1451
|
| 380 |
+
torchvision/tv_tensors/_torch_function_helpers.py,sha256=81qDZqgzUeSgfSeWhsrw1Ukwltvf97WbwmKWHm7X8X0,2276
|
| 381 |
+
torchvision/tv_tensors/_tv_tensor.py,sha256=dGQJhvOVTjb1LVT5qPZLJxox30uDMmODB26Iz6TjVbc,6248
|
| 382 |
+
torchvision/tv_tensors/_video.py,sha256=qSKu-ZQsXbJEPXIob5bxaGFM76nhypNFDVumO0x6wkA,1383
|
| 383 |
+
torchvision/utils.py,sha256=fwpoqLk5EIvN8h91kkzg2IiOD_8F3w11L0YZTTX8XAo,23512
|
| 384 |
+
torchvision/version.py,sha256=BLVkvW50Esh6znE4f_U852clr82aYSQMDWrQMbftj6U,203
|
evalkit_cambrian/lib/python3.10/site-packages/torchvision-0.17.0+cu118.dist-info/REQUESTED
ADDED
|
File without changes
|
evalkit_cambrian/lib/python3.10/site-packages/torchvision-0.17.0+cu118.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: bdist_wheel (0.37.1)
|
| 3 |
+
Root-Is-Purelib: false
|
| 4 |
+
Tag: cp310-cp310-linux_x86_64
|
| 5 |
+
|
evalkit_cambrian/lib/python3.10/site-packages/torchvision-0.17.0+cu118.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
torchvision
|
evalkit_cambrian/lib/python3.10/site-packages/triton/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.19 kB). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/triton/__pycache__/testing.cpython-310.pyc
ADDED
|
Binary file (13.7 kB). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/triton/language/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (2.76 kB). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/triton/language/__pycache__/core.cpython-310.pyc
ADDED
|
Binary file (58.9 kB). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/triton/language/__pycache__/math.cpython-310.pyc
ADDED
|
Binary file (33.7 kB). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/triton/language/__pycache__/random.cpython-310.pyc
ADDED
|
Binary file (5.35 kB). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/triton/language/__pycache__/semantic.cpython-310.pyc
ADDED
|
Binary file (41.8 kB). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/triton/language/__pycache__/standard.cpython-310.pyc
ADDED
|
Binary file (9.63 kB). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/triton/language/extra/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import cuda
|
| 2 |
+
|
| 3 |
+
__all__ = ['cuda']
|
evalkit_cambrian/lib/python3.10/site-packages/triton/language/extra/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (234 Bytes). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/triton/language/extra/__pycache__/cuda.cpython-310.pyc
ADDED
|
Binary file (795 Bytes). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/triton/language/extra/cuda.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .. import core
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
@core.extern
|
| 5 |
+
def globaltimer(_builder=None):
|
| 6 |
+
return core.inline_asm_elementwise("mov.u64 $0, %globaltimer;", "=l", [], dtype=core.int64, is_pure=False, pack=1,
|
| 7 |
+
_builder=_builder)
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@core.extern
|
| 11 |
+
def smid(_builder=None):
|
| 12 |
+
return core.inline_asm_elementwise("mov.u32 $0, %smid;", "=r", [], dtype=core.int32, is_pure=True, pack=1,
|
| 13 |
+
_builder=_builder)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@core.builtin
|
| 17 |
+
def num_threads(_builder=None):
|
| 18 |
+
return core.constexpr(_builder.target.num_warps * 32)
|
evalkit_cambrian/lib/python3.10/site-packages/triton/language/semantic.py
ADDED
|
@@ -0,0 +1,1565 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations # remove after python 3.11
|
| 2 |
+
|
| 3 |
+
from functools import wraps
|
| 4 |
+
from typing import List, Optional, Sequence, Tuple, TypeVar
|
| 5 |
+
|
| 6 |
+
from .._C.libtriton.triton import ir
|
| 7 |
+
from ..common.build import is_hip
|
| 8 |
+
from . import core as tl
|
| 9 |
+
|
| 10 |
+
T = TypeVar('T')
|
| 11 |
+
|
| 12 |
+
# TODO: redundant code -- remove after 3P backend refactor
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def _is_cuda(target):
|
| 16 |
+
from ..compiler.compiler import CudaTargetDescriptor
|
| 17 |
+
return isinstance(target, CudaTargetDescriptor)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# Create custom exception that prints message "hello"
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class IncompatibleTypeErrorImpl(Exception):
|
| 24 |
+
|
| 25 |
+
def __init__(self, type_a, type_b):
|
| 26 |
+
self.type_a = type_a
|
| 27 |
+
self.type_b = type_b
|
| 28 |
+
self.message = "invalid operands of type " + self.type_a.__repr__() + " and " + self.type_b.__repr__()
|
| 29 |
+
super(IncompatibleTypeErrorImpl, self).__init__(self.message)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
# ===----------------------------------------------------------------------===##
|
| 33 |
+
# Programming Model
|
| 34 |
+
# ===----------------------------------------------------------------------===##
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def program_id(axis: int, builder: ir.builder) -> tl.tensor:
|
| 38 |
+
if axis not in (0, 1, 2):
|
| 39 |
+
raise ValueError(f"program_id axis must be 0, 1, or 2 but got {axis}")
|
| 40 |
+
return tl.tensor(builder.create_get_program_id(axis), tl.int32)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def num_programs(axis: int, builder: ir.builder) -> tl.tensor:
|
| 44 |
+
if axis not in (0, 1, 2):
|
| 45 |
+
raise ValueError(f"num_programs axis must be 0, 1, or 2 but got {axis}")
|
| 46 |
+
return tl.tensor(builder.create_get_num_programs(axis), tl.int32)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
# ===----------------------------------------------------------------------===//
|
| 50 |
+
# Implicit Casting Utilities
|
| 51 |
+
# ===----------------------------------------------------------------------===//
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def integer_promote_impl(a_ty: tl.dtype, b_ty: tl.dtype) -> tl.dtype:
|
| 55 |
+
a_rank = a_ty.int_bitwidth
|
| 56 |
+
b_rank = b_ty.int_bitwidth
|
| 57 |
+
a_sn = a_ty.int_signedness
|
| 58 |
+
b_sn = b_ty.int_signedness
|
| 59 |
+
# Rules for signedness taken from "Usual arithmetic conversions" on
|
| 60 |
+
# https://en.cppreference.com/w/c/language/conversion.
|
| 61 |
+
if a_sn == b_sn:
|
| 62 |
+
return a_ty if a_rank > b_rank else b_ty
|
| 63 |
+
elif a_sn == tl.dtype.SIGNEDNESS.UNSIGNED:
|
| 64 |
+
return a_ty if a_rank >= b_rank else b_ty
|
| 65 |
+
elif b_sn == tl.dtype.SIGNEDNESS.UNSIGNED:
|
| 66 |
+
return b_ty if b_rank >= a_rank else a_ty
|
| 67 |
+
assert False
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def computation_type_impl(a_ty: tl.dtype, b_ty: tl.dtype, div_or_mod: bool) -> tl.dtype:
|
| 71 |
+
# 1) if one operand is double, the other is implicitly
|
| 72 |
+
# converted to double
|
| 73 |
+
if a_ty.is_fp64() or b_ty.is_fp64():
|
| 74 |
+
return tl.float64
|
| 75 |
+
# 2) if one operand is float, the other is implicitly
|
| 76 |
+
# converted to float
|
| 77 |
+
if a_ty.is_fp32() or b_ty.is_fp32():
|
| 78 |
+
return tl.float32
|
| 79 |
+
# 3 ) if one operand is half, the other is implicitly converted to half
|
| 80 |
+
# unless we're doing / or %, which do not exist natively in PTX for fp16.
|
| 81 |
+
# Supported PTX op: add, sub, mul, fma, neg, abs, min, max, tanh, ex2, setp
|
| 82 |
+
if a_ty.is_fp16() or b_ty.is_fp16():
|
| 83 |
+
if div_or_mod:
|
| 84 |
+
return tl.float32
|
| 85 |
+
else:
|
| 86 |
+
return tl.float16
|
| 87 |
+
# 4) return bf16 only if both operands are of bf16
|
| 88 |
+
if a_ty.is_bf16() or b_ty.is_bf16():
|
| 89 |
+
if div_or_mod:
|
| 90 |
+
return tl.float32
|
| 91 |
+
if a_ty.is_bf16() and b_ty.is_bf16():
|
| 92 |
+
return tl.bfloat16
|
| 93 |
+
return tl.float32
|
| 94 |
+
if not a_ty.is_int() or not b_ty.is_int():
|
| 95 |
+
assert False
|
| 96 |
+
# 5 ) both operands are integer and undergo
|
| 97 |
+
# integer promotion
|
| 98 |
+
if div_or_mod and a_ty.int_signedness != b_ty.int_signedness:
|
| 99 |
+
raise ValueError("Cannot use /, #, or % with " + a_ty.__repr__() + " and " + b_ty.__repr__() +
|
| 100 |
+
" because they have different signedness;"
|
| 101 |
+
"this is unlikely to result in a useful answer. Cast them to the same signedness.")
|
| 102 |
+
return integer_promote_impl(a_ty, b_ty)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
# ===----------------------------------------------------------------------===//
|
| 106 |
+
# Binary Operators
|
| 107 |
+
# ===----------------------------------------------------------------------===//
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def check_ptr_type_impl(type_a: tl.dtype, type_b: tl.dtype, allow_ptr_a: bool) -> None:
|
| 111 |
+
if type_a.is_ptr():
|
| 112 |
+
if not allow_ptr_a:
|
| 113 |
+
raise IncompatibleTypeErrorImpl(type_a, type_b)
|
| 114 |
+
# T* + U* with T != U
|
| 115 |
+
if type_b.is_ptr() and (type_a != type_b):
|
| 116 |
+
raise IncompatibleTypeErrorImpl(type_a, type_b)
|
| 117 |
+
# T* + float
|
| 118 |
+
if type_b.is_floating():
|
| 119 |
+
raise IncompatibleTypeErrorImpl(type_a, type_b)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def binary_op_type_checking_impl(lhs: tl.tensor, rhs: tl.tensor, builder: ir.builder, allow_lhs_ptr=False,
|
| 123 |
+
allow_rhs_ptr=False, arithmetic_check=True,
|
| 124 |
+
div_or_mod=False) -> Tuple[tl.tensor, tl.tensor]:
|
| 125 |
+
# implicit broadcasting
|
| 126 |
+
lhs, rhs = broadcast_impl_value(lhs, rhs, builder)
|
| 127 |
+
# implicit typecasting
|
| 128 |
+
lhs_sca_ty = lhs.type.scalar
|
| 129 |
+
rhs_sca_ty = rhs.type.scalar
|
| 130 |
+
check_ptr_type_impl(lhs_sca_ty, rhs_sca_ty, allow_lhs_ptr)
|
| 131 |
+
check_ptr_type_impl(rhs_sca_ty, lhs_sca_ty, allow_rhs_ptr)
|
| 132 |
+
if arithmetic_check and not lhs_sca_ty.is_ptr() and not rhs_sca_ty.is_ptr():
|
| 133 |
+
ret_sca_ty = computation_type_impl(lhs_sca_ty, rhs_sca_ty, div_or_mod)
|
| 134 |
+
lhs = cast(lhs, ret_sca_ty, builder)
|
| 135 |
+
rhs = cast(rhs, ret_sca_ty, builder)
|
| 136 |
+
return lhs, rhs
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def add(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 140 |
+
input, other = binary_op_type_checking_impl(input, other, builder, True, True)
|
| 141 |
+
input_scalar_ty = input.type.scalar
|
| 142 |
+
other_scalar_ty = other.type.scalar
|
| 143 |
+
if input_scalar_ty.is_ptr() and other_scalar_ty.is_ptr():
|
| 144 |
+
raise ValueError("cannot add pointers together")
|
| 145 |
+
|
| 146 |
+
# offset + ptr
|
| 147 |
+
# ptr + offset
|
| 148 |
+
if other_scalar_ty.is_ptr() and not input_scalar_ty.is_ptr():
|
| 149 |
+
input, other = other, input
|
| 150 |
+
input_scalar_ty = input.type.scalar
|
| 151 |
+
other_scalar_ty = other.type.scalar
|
| 152 |
+
if input_scalar_ty.is_ptr():
|
| 153 |
+
return tl.tensor(builder.create_addptr(input.handle, other.handle), input.type)
|
| 154 |
+
# float + float
|
| 155 |
+
elif input_scalar_ty.is_floating():
|
| 156 |
+
return tl.tensor(builder.create_fadd(input.handle, other.handle), input.type)
|
| 157 |
+
# int + int
|
| 158 |
+
elif input_scalar_ty.is_int():
|
| 159 |
+
return tl.tensor(builder.create_add(input.handle, other.handle), input.type)
|
| 160 |
+
assert False
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def sub(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 164 |
+
input, other = binary_op_type_checking_impl(input, other, builder, True, False)
|
| 165 |
+
scalar_ty = input.type.scalar
|
| 166 |
+
# ptr - offset
|
| 167 |
+
if scalar_ty.is_ptr():
|
| 168 |
+
return tl.tensor(builder.create_addptr(input.handle, minus(other, builder).handle), input.type)
|
| 169 |
+
# float - float
|
| 170 |
+
if scalar_ty.is_floating():
|
| 171 |
+
return tl.tensor(builder.create_fsub(input.handle, other.handle), input.type)
|
| 172 |
+
# int - int
|
| 173 |
+
elif scalar_ty.is_int():
|
| 174 |
+
return tl.tensor(builder.create_sub(input.handle, other.handle), input.type)
|
| 175 |
+
assert False
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def mul(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 179 |
+
input, other = binary_op_type_checking_impl(input, other, builder)
|
| 180 |
+
scalar_ty = input.type.scalar
|
| 181 |
+
# float * float
|
| 182 |
+
if scalar_ty.is_floating():
|
| 183 |
+
return tl.tensor(builder.create_fmul(input.handle, other.handle), input.type)
|
| 184 |
+
# * int
|
| 185 |
+
elif scalar_ty.is_int():
|
| 186 |
+
return tl.tensor(builder.create_mul(input.handle, other.handle), input.type)
|
| 187 |
+
assert False
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def truediv(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 191 |
+
input, other = binary_op_type_checking_impl(input, other, builder, False, False, True, True)
|
| 192 |
+
input_scalar_ty = input.type.scalar
|
| 193 |
+
other_scalar_ty = other.type.scalar
|
| 194 |
+
# float / int
|
| 195 |
+
if input_scalar_ty.is_floating() and other_scalar_ty.is_int():
|
| 196 |
+
other = cast(other, input_scalar_ty, builder)
|
| 197 |
+
# int / float
|
| 198 |
+
elif input_scalar_ty.is_int() and other_scalar_ty.is_floating():
|
| 199 |
+
input = cast(input, other_scalar_ty, builder)
|
| 200 |
+
# int / int (cast to tl.float32)
|
| 201 |
+
elif input_scalar_ty.is_int() and other_scalar_ty.is_int():
|
| 202 |
+
input = cast(input, tl.float32, builder)
|
| 203 |
+
other = cast(other, tl.float32, builder)
|
| 204 |
+
# float / float (cast to the highest exponent type)
|
| 205 |
+
elif input_scalar_ty.is_floating() and other_scalar_ty.is_floating():
|
| 206 |
+
if input_scalar_ty.fp_mantissa_width > other_scalar_ty.fp_mantissa_width:
|
| 207 |
+
other = cast(other, input_scalar_ty, builder)
|
| 208 |
+
else:
|
| 209 |
+
input = cast(input, other_scalar_ty, builder)
|
| 210 |
+
# unreachable
|
| 211 |
+
else:
|
| 212 |
+
assert False
|
| 213 |
+
return tl.tensor(builder.create_fdiv(input.handle, other.handle), input.type)
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
def floordiv(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 217 |
+
input, other = binary_op_type_checking_impl(input, other, builder, False, False, True, True)
|
| 218 |
+
input_scalar_ty = input.type.scalar
|
| 219 |
+
other_scalar_ty = other.type.scalar
|
| 220 |
+
if input_scalar_ty.is_int() and other_scalar_ty.is_int():
|
| 221 |
+
ret_ty = integer_promote_impl(input_scalar_ty, other_scalar_ty)
|
| 222 |
+
input = cast(input, ret_ty, builder)
|
| 223 |
+
other = cast(other, ret_ty, builder)
|
| 224 |
+
if ret_ty.is_int_signed():
|
| 225 |
+
return tl.tensor(builder.create_sdiv(input.handle, other.handle), input.type)
|
| 226 |
+
else:
|
| 227 |
+
return tl.tensor(builder.create_udiv(input.handle, other.handle), input.type)
|
| 228 |
+
assert False
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def fdiv(input: tl.tensor, other: tl.tensor, ieee_rounding: bool, builder: ir.builder) -> tl.tensor:
|
| 232 |
+
input_scalar_ty = input.type.scalar
|
| 233 |
+
other_scalar_ty = other.type.scalar
|
| 234 |
+
if not input_scalar_ty.is_floating() or not other_scalar_ty.is_floating():
|
| 235 |
+
raise ValueError("both operands of fdiv must have floating scalar type")
|
| 236 |
+
input, other = binary_op_type_checking_impl(input, other, builder, False, False, False, True)
|
| 237 |
+
ret = builder.create_fdiv(input.handle, other.handle)
|
| 238 |
+
return tl.tensor(ret, input.type)
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
def mod(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 242 |
+
input, other = binary_op_type_checking_impl(input, other, builder, False, False, True, True)
|
| 243 |
+
scalar_ty = input.type.scalar
|
| 244 |
+
other_scalar_ty = other.type.scalar
|
| 245 |
+
# float % float
|
| 246 |
+
if scalar_ty.is_floating():
|
| 247 |
+
# input - input.div(other, rounding_mode="floor") * other
|
| 248 |
+
ret = sub(input, mul(floor(fdiv(input, other, False, builder), builder), other, builder), builder)
|
| 249 |
+
return ret
|
| 250 |
+
# % int
|
| 251 |
+
elif scalar_ty.is_int():
|
| 252 |
+
if scalar_ty.int_signedness != other_scalar_ty.int_signedness:
|
| 253 |
+
raise ValueError("Cannot mod " + scalar_ty.__repr__() + " by " + other_scalar_ty.__repr__() + " "
|
| 254 |
+
"because they have different signedness;"
|
| 255 |
+
"this is unlikely to result in a useful answer. Cast them to the same signedness.")
|
| 256 |
+
if scalar_ty.is_int_signed():
|
| 257 |
+
return tl.tensor(builder.create_srem(input.handle, other.handle), input.type)
|
| 258 |
+
else:
|
| 259 |
+
return tl.tensor(builder.create_urem(input.handle, other.handle), input.type)
|
| 260 |
+
assert False
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
##############
|
| 264 |
+
# bitwise ops
|
| 265 |
+
##############
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def bitwise_op_type_checking_impl(input: tl.tensor, other: tl.tensor,
|
| 269 |
+
builder: ir.builder) -> Tuple[tl.tensor, tl.tensor]:
|
| 270 |
+
input, other = binary_op_type_checking_impl(input, other, builder, False, False, False)
|
| 271 |
+
input_sca_ty = input.type.scalar
|
| 272 |
+
other_sca_ty = other.type.scalar
|
| 273 |
+
if not input_sca_ty.is_int() or not other_sca_ty.is_int():
|
| 274 |
+
raise IncompatibleTypeErrorImpl(input_sca_ty, other_sca_ty)
|
| 275 |
+
ret_sca_ty = integer_promote_impl(input_sca_ty, other_sca_ty)
|
| 276 |
+
if ret_sca_ty != input_sca_ty:
|
| 277 |
+
input = cast(input, ret_sca_ty, builder)
|
| 278 |
+
if ret_sca_ty != other_sca_ty:
|
| 279 |
+
other = cast(other, ret_sca_ty, builder)
|
| 280 |
+
return input, other
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def and_(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 284 |
+
input, other = bitwise_op_type_checking_impl(input, other, builder)
|
| 285 |
+
return tl.tensor(builder.create_and(input.handle, other.handle), input.type)
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
def or_(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 289 |
+
input, other = bitwise_op_type_checking_impl(input, other, builder)
|
| 290 |
+
return tl.tensor(builder.create_or(input.handle, other.handle), input.type)
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
def xor_(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 294 |
+
input, other = bitwise_op_type_checking_impl(input, other, builder)
|
| 295 |
+
return tl.tensor(builder.create_xor(input.handle, other.handle), input.type)
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
def logical_and(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 299 |
+
if not input.type.is_int1():
|
| 300 |
+
input = bitcast(input, tl.dtype("int1"), builder)
|
| 301 |
+
if not other.type.is_int1():
|
| 302 |
+
other = bitcast(other, tl.dtype("int1"), builder)
|
| 303 |
+
return and_(input, other, builder)
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
def logical_or(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 307 |
+
if not input.type.is_int1():
|
| 308 |
+
input = bitcast(input, tl.dtype("int1"), builder)
|
| 309 |
+
if not other.type.is_int1():
|
| 310 |
+
other = bitcast(other, tl.dtype("int1"), builder)
|
| 311 |
+
return or_(input, other, builder)
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
def not_(input: tl.tensor, builder: ir.builder):
|
| 315 |
+
if not input.type.is_int1():
|
| 316 |
+
input = bitcast(input, tl.dtype("int1"), builder)
|
| 317 |
+
return invert(input, builder)
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
def lshr(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 321 |
+
input, other = bitwise_op_type_checking_impl(input, other, builder)
|
| 322 |
+
return tl.tensor(builder.create_lshr(input.handle, other.handle), input.type)
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def ashr(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 326 |
+
input, other = bitwise_op_type_checking_impl(input, other, builder)
|
| 327 |
+
return tl.tensor(builder.create_ashr(input.handle, other.handle), input.type)
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
def shl(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 331 |
+
input, other = bitwise_op_type_checking_impl(input, other, builder)
|
| 332 |
+
return tl.tensor(builder.create_shl(input.handle, other.handle), input.type)
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
# ===----------------------------------------------------------------------===//
|
| 336 |
+
# Unary Operators
|
| 337 |
+
# ===----------------------------------------------------------------------===//
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
def plus(input: tl.tensor) -> tl.tensor:
|
| 341 |
+
return input
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
def minus(input: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 345 |
+
input_sca_ty = input.type.scalar
|
| 346 |
+
if input_sca_ty.is_ptr():
|
| 347 |
+
raise ValueError("wrong type argument to unary minus (" + input_sca_ty.__repr__() + ")")
|
| 348 |
+
_0 = tl.tensor(builder.get_null_value(input_sca_ty.to_ir(builder)), input_sca_ty)
|
| 349 |
+
return sub(_0, input, builder)
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
def invert(input: tl.tensor, builder: tl.tensor) -> tl.tensor:
|
| 353 |
+
input_sca_ty = input.type.scalar
|
| 354 |
+
if input_sca_ty.is_ptr() or input_sca_ty.is_floating():
|
| 355 |
+
raise ValueError("wrong type argument to unary invert (" + input_sca_ty.__repr__() + ")")
|
| 356 |
+
_1 = tl.tensor(builder.get_all_ones_value(input_sca_ty.to_ir(builder)), input_sca_ty)
|
| 357 |
+
return xor_(input, _1, builder)
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
# ===----------------------------------------------------------------------===//
|
| 361 |
+
# Comparison Operators
|
| 362 |
+
# ===----------------------------------------------------------------------===//
|
| 363 |
+
def _bool_like(v: tl.tensor) -> tl.block_type:
|
| 364 |
+
if not v.type.is_block():
|
| 365 |
+
return tl.int1
|
| 366 |
+
shape = v.type.shape
|
| 367 |
+
return tl.block_type(tl.int1, shape)
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
def greater_than(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 371 |
+
input, other = binary_op_type_checking_impl(input, other, builder)
|
| 372 |
+
scalar_ty = input.type.scalar
|
| 373 |
+
# float > float
|
| 374 |
+
if scalar_ty.is_floating():
|
| 375 |
+
return tl.tensor(builder.create_fcmpOGT(input.handle, other.handle), _bool_like(input))
|
| 376 |
+
# > int
|
| 377 |
+
elif scalar_ty.is_int():
|
| 378 |
+
if scalar_ty.is_int_signed():
|
| 379 |
+
return tl.tensor(builder.create_icmpSGT(input.handle, other.handle), _bool_like(input))
|
| 380 |
+
else:
|
| 381 |
+
return tl.tensor(builder.create_icmpUGT(input.handle, other.handle), _bool_like(input))
|
| 382 |
+
assert False
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
def greater_equal(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 386 |
+
input, other = binary_op_type_checking_impl(input, other, builder)
|
| 387 |
+
scalar_ty = input.type.scalar
|
| 388 |
+
# float >= float
|
| 389 |
+
if scalar_ty.is_floating():
|
| 390 |
+
return tl.tensor(builder.create_fcmpOGE(input.handle, other.handle), _bool_like(input))
|
| 391 |
+
# >= int
|
| 392 |
+
elif scalar_ty.is_int():
|
| 393 |
+
if scalar_ty.is_int_signed():
|
| 394 |
+
return tl.tensor(builder.create_icmpSGE(input.handle, other.handle), _bool_like(input))
|
| 395 |
+
else:
|
| 396 |
+
return tl.tensor(builder.create_icmpUGE(input.handle, other.handle), _bool_like(input))
|
| 397 |
+
assert False
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
def less_than(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 401 |
+
input, other = binary_op_type_checking_impl(input, other, builder)
|
| 402 |
+
scalar_ty = input.type.scalar
|
| 403 |
+
# float < float
|
| 404 |
+
if scalar_ty.is_floating():
|
| 405 |
+
return tl.tensor(builder.create_fcmpOLT(input.handle, other.handle), _bool_like(input))
|
| 406 |
+
# < int
|
| 407 |
+
elif scalar_ty.is_int():
|
| 408 |
+
if scalar_ty.is_int_signed():
|
| 409 |
+
return tl.tensor(builder.create_icmpSLT(input.handle, other.handle), _bool_like(input))
|
| 410 |
+
else:
|
| 411 |
+
return tl.tensor(builder.create_icmpULT(input.handle, other.handle), _bool_like(input))
|
| 412 |
+
assert False
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
def less_equal(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 416 |
+
input, other = binary_op_type_checking_impl(input, other, builder)
|
| 417 |
+
scalar_ty = input.type.scalar
|
| 418 |
+
# float < float
|
| 419 |
+
if scalar_ty.is_floating():
|
| 420 |
+
return tl.tensor(builder.create_fcmpOLE(input.handle, other.handle), _bool_like(input))
|
| 421 |
+
# < int
|
| 422 |
+
elif scalar_ty.is_int():
|
| 423 |
+
if scalar_ty.is_int_signed():
|
| 424 |
+
return tl.tensor(builder.create_icmpSLE(input.handle, other.handle), _bool_like(input))
|
| 425 |
+
else:
|
| 426 |
+
return tl.tensor(builder.create_icmpULE(input.handle, other.handle), _bool_like(input))
|
| 427 |
+
assert False
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
def equal(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 431 |
+
input, other = binary_op_type_checking_impl(input, other, builder)
|
| 432 |
+
scalar_ty = input.type.scalar
|
| 433 |
+
# float == float
|
| 434 |
+
if scalar_ty.is_floating():
|
| 435 |
+
return tl.tensor(builder.create_fcmpOEQ(input.handle, other.handle), _bool_like(input))
|
| 436 |
+
# == int
|
| 437 |
+
elif scalar_ty.is_int():
|
| 438 |
+
return tl.tensor(builder.create_icmpEQ(input.handle, other.handle), _bool_like(input))
|
| 439 |
+
assert False
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
def not_equal(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 443 |
+
input, other = binary_op_type_checking_impl(input, other, builder)
|
| 444 |
+
scalar_ty = input.type.scalar
|
| 445 |
+
# float == float
|
| 446 |
+
if scalar_ty.is_floating():
|
| 447 |
+
return tl.tensor(builder.create_fcmpUNE(input.handle, other.handle), _bool_like(input))
|
| 448 |
+
# == int
|
| 449 |
+
elif scalar_ty.is_int():
|
| 450 |
+
return tl.tensor(builder.create_icmpNE(input.handle, other.handle), _bool_like(input))
|
| 451 |
+
assert False
|
| 452 |
+
|
| 453 |
+
|
| 454 |
+
# ===----------------------------------------------------------------------===//
|
| 455 |
+
# Block Creation
|
| 456 |
+
# ===----------------------------------------------------------------------===//
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
def arange(start: int, end: int, builder: ir.builder) -> tl.tensor:
|
| 460 |
+
if not isinstance(start, int) or not isinstance(end, int):
|
| 461 |
+
raise ValueError("arange's arguments must be of type tl.constexpr")
|
| 462 |
+
is_start_int64 = bool(start >> 32)
|
| 463 |
+
is_end_int64 = bool(end >> 32)
|
| 464 |
+
if is_start_int64 or is_end_int64:
|
| 465 |
+
raise ValueError("arange must fit in int32")
|
| 466 |
+
if end <= start:
|
| 467 |
+
raise ValueError("arange's end argument must be greater than the start argument")
|
| 468 |
+
|
| 469 |
+
shape = [end - start]
|
| 470 |
+
ret_ty = tl.block_type(tl.int32, shape)
|
| 471 |
+
return tl.tensor(builder.create_make_range(start, end), ret_ty)
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
def full(shape: List[int], value, dtype: tl.dtype, builder: ir.builder) -> tl.tensor:
|
| 475 |
+
if isinstance(value, tl.tensor):
|
| 476 |
+
assert value.numel.value == 1, "only accepts size-1 tensor"
|
| 477 |
+
value = cast(value, dtype, builder)
|
| 478 |
+
else:
|
| 479 |
+
# scalar
|
| 480 |
+
if dtype is None:
|
| 481 |
+
raise ValueError("dtype must be specified when value is not a tensor")
|
| 482 |
+
if value == 0:
|
| 483 |
+
value = builder.get_null_value(dtype.to_ir(builder))
|
| 484 |
+
else:
|
| 485 |
+
get_value_fn = getattr(builder, f"get_{dtype.name}")
|
| 486 |
+
value = get_value_fn(value)
|
| 487 |
+
value = tl.tensor(value, dtype)
|
| 488 |
+
|
| 489 |
+
return splat(value, shape, builder)
|
| 490 |
+
|
| 491 |
+
|
| 492 |
+
# ===----------------------------------------------------------------------===//
|
| 493 |
+
# Shape Manipulation
|
| 494 |
+
# ===----------------------------------------------------------------------===//
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
def splat(value: tl.tensor, shape: List[int], builder: ir.builder) -> tl.tensor:
|
| 498 |
+
assert not value.type.is_block(), "Cannot splat a block tensor"
|
| 499 |
+
if len(shape) == 0:
|
| 500 |
+
return value
|
| 501 |
+
ret_ty = tl.block_type(value.dtype, shape)
|
| 502 |
+
return tl.tensor(builder.create_splat(value.handle, shape), ret_ty)
|
| 503 |
+
|
| 504 |
+
|
| 505 |
+
def view(input: tl.tensor, dst_shape: List[int], builder: ir.builder) -> tl.tensor:
|
| 506 |
+
numel = 1
|
| 507 |
+
for s in dst_shape:
|
| 508 |
+
numel *= s
|
| 509 |
+
if input.type.numel != numel:
|
| 510 |
+
raise ValueError("cannot view block of different shape")
|
| 511 |
+
ret_ty = tl.block_type(input.type.scalar, dst_shape)
|
| 512 |
+
return tl.tensor(builder.create_reshape(input.handle, dst_shape, True), ret_ty)
|
| 513 |
+
|
| 514 |
+
|
| 515 |
+
def reshape(input: tl.tensor, dst_shape: List[int], builder: ir.builder) -> tl.tensor:
|
| 516 |
+
ret_ty = tl.block_type(input.type.scalar, dst_shape)
|
| 517 |
+
return tl.tensor(builder.create_reshape(input.handle, dst_shape, False), ret_ty)
|
| 518 |
+
|
| 519 |
+
|
| 520 |
+
def expand_dims(input: tl.tensor, axis: int, builder: ir.builder) -> tl.tensor:
|
| 521 |
+
dst_shape = [tl._constexpr_to_value(x) for x in input.shape]
|
| 522 |
+
dst_shape.insert(axis, 1)
|
| 523 |
+
|
| 524 |
+
if not input.type.is_block():
|
| 525 |
+
return splat(input, shape=dst_shape, builder=builder)
|
| 526 |
+
|
| 527 |
+
ret_ty = tl.block_type(input.type.scalar, dst_shape)
|
| 528 |
+
return tl.tensor(builder.create_expand_dims(input.handle, axis), ret_ty)
|
| 529 |
+
|
| 530 |
+
|
| 531 |
+
def cat(lhs: tl.tensor, rhs: tl.tensor, can_reorder: bool, builder: ir.builder) -> tl.tensor:
|
| 532 |
+
assert can_reorder, "current implementation of `cat` always may reorder elements"
|
| 533 |
+
assert len(lhs.shape) == 1
|
| 534 |
+
ret_type = tl.block_type(lhs.type.scalar, [lhs.shape[0] + rhs.shape[0]])
|
| 535 |
+
return tl.tensor(builder.create_cat(lhs.handle, rhs.handle), ret_type)
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
def trans(input: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 539 |
+
if len(input.shape) != 2:
|
| 540 |
+
raise ValueError("Only 2D tensors can be transposed")
|
| 541 |
+
ret_type = tl.block_type(input.type.scalar, [input.shape[1], input.shape[0]])
|
| 542 |
+
return tl.tensor(builder.create_trans(input.handle), ret_type)
|
| 543 |
+
|
| 544 |
+
|
| 545 |
+
def broadcast_impl_shape(input: tl.tensor, shape: List[int], builder: ir.builder) -> tl.tensor:
|
| 546 |
+
if not input.type.is_block():
|
| 547 |
+
ret_ty = tl.block_type(input.type, shape)
|
| 548 |
+
return tl.tensor(builder.create_splat(input.handle, shape), ret_ty)
|
| 549 |
+
src_shape = input.type.get_block_shapes()
|
| 550 |
+
if len(src_shape) != len(shape):
|
| 551 |
+
raise ValueError(f"Cannot broadcast, rank mismatch: {src_shape}, {shape}")
|
| 552 |
+
if shape == src_shape:
|
| 553 |
+
return input
|
| 554 |
+
for i, item in enumerate(src_shape):
|
| 555 |
+
if shape[i] != item and item != 1:
|
| 556 |
+
raise ValueError(f"Cannot broadcast, the expanded size of the tensor ({shape[i]})"
|
| 557 |
+
f" must match the existing size ({item}) at non-singleton dimension"
|
| 558 |
+
f" {i}: {src_shape}, {shape}")
|
| 559 |
+
ret_ty = tl.block_type(input.type.scalar, shape)
|
| 560 |
+
return tl.tensor(builder.create_broadcast(input.handle, shape), ret_ty)
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
def broadcast_impl_value(lhs: tl.tensor, rhs: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 564 |
+
lhs_ty = lhs.type
|
| 565 |
+
rhs_ty = rhs.type
|
| 566 |
+
|
| 567 |
+
# make_shape_compatible(block, scalar)
|
| 568 |
+
if lhs_ty.is_block() and not rhs_ty.is_block():
|
| 569 |
+
rhs_ty = tl.block_type(rhs_ty.scalar, lhs_ty.shape)
|
| 570 |
+
rhs = tl.tensor(builder.create_splat(rhs.handle, lhs_ty.get_block_shapes()), rhs_ty)
|
| 571 |
+
# make_shape_compatible(scalar, block)
|
| 572 |
+
elif not lhs_ty.is_block() and rhs_ty.is_block():
|
| 573 |
+
lhs_ty = tl.block_type(lhs_ty.scalar, rhs_ty.shape)
|
| 574 |
+
lhs = tl.tensor(builder.create_splat(lhs.handle, rhs_ty.get_block_shapes()), lhs_ty)
|
| 575 |
+
# make_shape_compatible(block, block)
|
| 576 |
+
elif lhs_ty.is_block() and rhs_ty.is_block():
|
| 577 |
+
lhs_shape = lhs_ty.get_block_shapes()
|
| 578 |
+
rhs_shape = rhs_ty.get_block_shapes()
|
| 579 |
+
|
| 580 |
+
if len(lhs_shape) < len(rhs_shape):
|
| 581 |
+
# Add new axes to lhs
|
| 582 |
+
for dim in range(len(lhs_shape), len(rhs_shape)):
|
| 583 |
+
lhs = tl.tensor(builder.create_expand_dims(lhs.handle, 0),
|
| 584 |
+
tl.block_type(lhs_ty.scalar, [1] + lhs_shape))
|
| 585 |
+
lhs_ty = lhs.type
|
| 586 |
+
lhs_shape = lhs_ty.get_block_shapes()
|
| 587 |
+
elif len(rhs_shape) < len(lhs_shape):
|
| 588 |
+
# Add new axes to rhs
|
| 589 |
+
for dim in range(len(rhs_shape), len(lhs_shape)):
|
| 590 |
+
rhs = tl.tensor(builder.create_expand_dims(rhs.handle, 0),
|
| 591 |
+
tl.block_type(rhs_ty.scalar, [1] + rhs_shape))
|
| 592 |
+
rhs_ty = rhs.type
|
| 593 |
+
rhs_shape = rhs_ty.get_block_shapes()
|
| 594 |
+
assert len(rhs_shape) == len(lhs_shape)
|
| 595 |
+
|
| 596 |
+
ret_shape = []
|
| 597 |
+
for i, left in enumerate(lhs_shape):
|
| 598 |
+
right = rhs_shape[i]
|
| 599 |
+
if left == 1:
|
| 600 |
+
ret_shape.append(right)
|
| 601 |
+
elif right == 1:
|
| 602 |
+
ret_shape.append(left)
|
| 603 |
+
elif left == right:
|
| 604 |
+
ret_shape.append(left)
|
| 605 |
+
else:
|
| 606 |
+
raise ValueError("Cannot make_shape_compatible: incompatible dimensions "
|
| 607 |
+
"at index " + str(i) + ": " + str(left) + " and " + str(right))
|
| 608 |
+
if lhs_shape != ret_shape:
|
| 609 |
+
ret_ty = tl.block_type(lhs_ty.scalar, ret_shape)
|
| 610 |
+
lhs = tl.tensor(builder.create_broadcast(lhs.handle, ret_shape), ret_ty)
|
| 611 |
+
if rhs_shape != ret_shape:
|
| 612 |
+
ret_ty = tl.block_type(rhs_ty.scalar, ret_shape)
|
| 613 |
+
rhs = tl.tensor(builder.create_broadcast(rhs.handle, ret_shape), ret_ty)
|
| 614 |
+
# (scalar, scalar) => returns original blocks
|
| 615 |
+
return lhs, rhs
|
| 616 |
+
|
| 617 |
+
|
| 618 |
+
#######
|
| 619 |
+
# cast
|
| 620 |
+
#######
|
| 621 |
+
|
| 622 |
+
|
| 623 |
+
def bitcast(input: tl.tensor, dst_ty: tl.dtype, builder: ir.builder) -> tl.tensor:
|
| 624 |
+
src_ty = input.type
|
| 625 |
+
if src_ty.is_block():
|
| 626 |
+
dst_ty = tl.block_type(dst_ty.scalar, input.type.get_block_shapes())
|
| 627 |
+
if src_ty == dst_ty:
|
| 628 |
+
return input
|
| 629 |
+
src_sca_ty = src_ty.scalar
|
| 630 |
+
dst_sca_ty = dst_ty.scalar
|
| 631 |
+
if src_sca_ty.is_ptr() or dst_sca_ty.is_ptr():
|
| 632 |
+
return cast(input, dst_ty, builder)
|
| 633 |
+
# Bitcast
|
| 634 |
+
src_bits = src_sca_ty.primitive_bitwidth
|
| 635 |
+
dst_bits = dst_sca_ty.primitive_bitwidth
|
| 636 |
+
if src_bits != dst_bits:
|
| 637 |
+
raise ValueError("Cannot bitcast data-type of size " + str(src_bits) + " to "
|
| 638 |
+
"data-type of size " + str(dst_bits))
|
| 639 |
+
return tl.tensor(builder.create_bitcast(input.handle, dst_ty.to_ir(builder)), dst_ty)
|
| 640 |
+
|
| 641 |
+
|
| 642 |
+
def cast(input: tl.tensor, dst_ty: tl.dtype, builder: ir.builder) -> tl.tensor:
|
| 643 |
+
src_ty = input.type
|
| 644 |
+
if isinstance(dst_ty, tl.constexpr):
|
| 645 |
+
dst_ty = dst_ty.value
|
| 646 |
+
if src_ty.is_block():
|
| 647 |
+
dst_ty = tl.block_type(dst_ty.scalar, input.type.get_block_shapes())
|
| 648 |
+
if src_ty == dst_ty:
|
| 649 |
+
return input
|
| 650 |
+
|
| 651 |
+
src_sca_ty = src_ty.scalar
|
| 652 |
+
dst_sca_ty = dst_ty.scalar
|
| 653 |
+
|
| 654 |
+
if _is_cuda(builder.target) and builder.target.capability < 89 and \
|
| 655 |
+
(src_sca_ty.is_fp8e4nv() or dst_sca_ty.is_fp8e4nv()):
|
| 656 |
+
assert False, "fp8e4nv data type is not supported on CUDA arch < 89"
|
| 657 |
+
|
| 658 |
+
# Casting with customized floating types involved: fp8 <=> bf16, fp16, fp32, fp64
|
| 659 |
+
if (src_sca_ty.is_fp8() and dst_sca_ty.is_floating()) or \
|
| 660 |
+
(src_sca_ty.is_floating() and dst_sca_ty.is_fp8()):
|
| 661 |
+
return tl.tensor(builder.create_fp_to_fp(input.handle, dst_ty.to_ir(builder)), dst_ty)
|
| 662 |
+
|
| 663 |
+
# bf16 <=> (not fp32)
|
| 664 |
+
if (src_sca_ty.is_fp16() and not dst_sca_ty.is_fp32()) or \
|
| 665 |
+
(src_sca_ty.is_bf16() and not dst_sca_ty.is_fp32()):
|
| 666 |
+
return cast(cast(input, tl.float32, builder), dst_sca_ty, builder)
|
| 667 |
+
|
| 668 |
+
# Standard floating types' casting: truncation
|
| 669 |
+
# fp64 => fp32, fp16, bf16
|
| 670 |
+
# fp32 => fp16, bf16
|
| 671 |
+
truncate_fp = src_sca_ty.is_floating() and \
|
| 672 |
+
dst_sca_ty.is_floating() and \
|
| 673 |
+
src_sca_ty.primitive_bitwidth > dst_sca_ty.primitive_bitwidth
|
| 674 |
+
if truncate_fp:
|
| 675 |
+
return tl.tensor(builder.create_fp_trunc(input.handle, dst_ty.to_ir(builder)), dst_ty)
|
| 676 |
+
|
| 677 |
+
# Standard floating types' casting: extension
|
| 678 |
+
# fp32 => fp64
|
| 679 |
+
# fp16 => fp32, fp64
|
| 680 |
+
# bf16 => fp32, fp64
|
| 681 |
+
ext_fp = src_sca_ty.is_floating() and \
|
| 682 |
+
dst_sca_ty.is_floating() and \
|
| 683 |
+
src_sca_ty.primitive_bitwidth < dst_sca_ty.primitive_bitwidth
|
| 684 |
+
if ext_fp:
|
| 685 |
+
return tl.tensor(builder.create_fp_ext(input.handle, dst_ty.to_ir(builder)), dst_ty)
|
| 686 |
+
|
| 687 |
+
# Casting between integer types
|
| 688 |
+
if src_sca_ty.is_int() and dst_sca_ty.is_int() and \
|
| 689 |
+
(src_sca_ty.int_bitwidth != dst_sca_ty.int_bitwidth or src_sca_ty.int_signedness != dst_sca_ty.int_signedness):
|
| 690 |
+
sign_extend = src_sca_ty.is_int_signed() and not src_sca_ty.is_bool()
|
| 691 |
+
if dst_sca_ty.is_bool():
|
| 692 |
+
ty = input.dtype.to_ir(builder)
|
| 693 |
+
_0 = tl.tensor(builder.get_null_value(ty), input.dtype)
|
| 694 |
+
return not_equal(input, _0, builder)
|
| 695 |
+
else:
|
| 696 |
+
return tl.tensor(builder.create_int_cast(input.handle, dst_ty.to_ir(builder), sign_extend), dst_ty)
|
| 697 |
+
|
| 698 |
+
# Casting standard floating types to integer types
|
| 699 |
+
if src_sca_ty.is_standard_floating() and dst_sca_ty.is_int():
|
| 700 |
+
if dst_sca_ty.is_bool():
|
| 701 |
+
ty = input.dtype.to_ir(builder)
|
| 702 |
+
_0 = tl.tensor(builder.get_null_value(ty), input.dtype)
|
| 703 |
+
return not_equal(input, _0, builder)
|
| 704 |
+
elif dst_sca_ty.is_int_signed():
|
| 705 |
+
return tl.tensor(builder.create_fp_to_si(input.handle, dst_ty.to_ir(builder)), dst_ty)
|
| 706 |
+
else:
|
| 707 |
+
return tl.tensor(builder.create_fp_to_ui(input.handle, dst_ty.to_ir(builder)), dst_ty)
|
| 708 |
+
|
| 709 |
+
# Casting integer types to standard floating types
|
| 710 |
+
if src_sca_ty.is_int() and dst_sca_ty.is_standard_floating():
|
| 711 |
+
if src_sca_ty.is_bool() or not src_sca_ty.is_int_signed():
|
| 712 |
+
return tl.tensor(builder.create_ui_to_fp(input.handle, dst_ty.to_ir(builder)), dst_ty)
|
| 713 |
+
else:
|
| 714 |
+
return tl.tensor(builder.create_si_to_fp(input.handle, dst_ty.to_ir(builder)), dst_ty)
|
| 715 |
+
|
| 716 |
+
# Casting pointer types to integer types
|
| 717 |
+
if src_sca_ty.is_ptr() and dst_sca_ty.is_int():
|
| 718 |
+
bitwidth = dst_sca_ty.int_bitwidth
|
| 719 |
+
if bitwidth == 64:
|
| 720 |
+
return tl.tensor(builder.create_ptr_to_int(input.handle, dst_ty.to_ir(builder)), dst_ty)
|
| 721 |
+
if bitwidth == 1:
|
| 722 |
+
return not_equal(cast(input, tl.int64, builder), tl.tensor(builder.get_int64(0), tl.int64), builder)
|
| 723 |
+
|
| 724 |
+
# Casting integer types to pointer types
|
| 725 |
+
if src_sca_ty.is_int() and dst_sca_ty.is_ptr():
|
| 726 |
+
return tl.tensor(builder.create_int_to_ptr(input.handle, dst_ty.to_ir(builder)), dst_ty)
|
| 727 |
+
|
| 728 |
+
# Casting pointer types to pointer types
|
| 729 |
+
if src_sca_ty.is_ptr() and dst_sca_ty.is_ptr():
|
| 730 |
+
return tl.tensor(builder.create_bitcast(input.handle, dst_ty.to_ir(builder)), dst_ty)
|
| 731 |
+
|
| 732 |
+
assert False, f'cannot cast {input} to {dst_ty}'
|
| 733 |
+
|
| 734 |
+
|
| 735 |
+
# ===----------------------------------------------------------------------===//
|
| 736 |
+
# Memory Operators
|
| 737 |
+
# ===----------------------------------------------------------------------===//
|
| 738 |
+
|
| 739 |
+
|
| 740 |
+
def _str_to_load_cache_modifier(cache_modifier):
|
| 741 |
+
cache = ir.CACHE_MODIFIER.NONE # default
|
| 742 |
+
if cache_modifier:
|
| 743 |
+
if cache_modifier == ".ca":
|
| 744 |
+
cache = ir.CACHE_MODIFIER.CA
|
| 745 |
+
elif cache_modifier == ".cg":
|
| 746 |
+
cache = ir.CACHE_MODIFIER.CG
|
| 747 |
+
else:
|
| 748 |
+
raise ValueError(f"Cache modifier {cache_modifier} not supported")
|
| 749 |
+
return cache
|
| 750 |
+
|
| 751 |
+
|
| 752 |
+
def _str_to_store_cache_modifier(cache_modifier):
|
| 753 |
+
cache = ir.CACHE_MODIFIER.NONE # default
|
| 754 |
+
if cache_modifier:
|
| 755 |
+
if cache_modifier == ".wb":
|
| 756 |
+
cache = ir.CACHE_MODIFIER.WB
|
| 757 |
+
elif cache_modifier == ".cg":
|
| 758 |
+
cache = ir.CACHE_MODIFIER.CG
|
| 759 |
+
elif cache_modifier == ".cs":
|
| 760 |
+
cache = ir.CACHE_MODIFIER.CS
|
| 761 |
+
elif cache_modifier == ".wt":
|
| 762 |
+
cache = ir.CACHE_MODIFIER.WT
|
| 763 |
+
else:
|
| 764 |
+
raise ValueError(f"Cache modifier {cache_modifier} not supported")
|
| 765 |
+
return cache
|
| 766 |
+
|
| 767 |
+
|
| 768 |
+
def _str_to_eviction_policy(eviction_policy):
|
| 769 |
+
eviction = ir.EVICTION_POLICY.NORMAL # default
|
| 770 |
+
if eviction_policy:
|
| 771 |
+
if eviction_policy == "evict_last":
|
| 772 |
+
eviction = ir.EVICTION_POLICY.EVICT_LAST
|
| 773 |
+
elif eviction_policy == "evict_first":
|
| 774 |
+
eviction = ir.EVICTION_POLICY.EVICT_FIRST
|
| 775 |
+
else:
|
| 776 |
+
raise ValueError(f"Eviction policy {eviction_policy} not supported")
|
| 777 |
+
return eviction
|
| 778 |
+
|
| 779 |
+
|
| 780 |
+
def _str_to_padding_option(padding_option):
|
| 781 |
+
padding = None # default
|
| 782 |
+
if padding_option:
|
| 783 |
+
if padding_option == "zero":
|
| 784 |
+
padding = ir.PADDING_OPTION.PAD_ZERO
|
| 785 |
+
elif padding_option == "nan":
|
| 786 |
+
padding = ir.PADDING_OPTION.PAD_NAN
|
| 787 |
+
else:
|
| 788 |
+
raise ValueError(f"Padding option {padding_option} not supported")
|
| 789 |
+
return padding
|
| 790 |
+
|
| 791 |
+
|
| 792 |
+
def _str_to_sem(sem_option):
|
| 793 |
+
sem = ir.MEM_SEMANTIC.ACQUIRE_RELEASE
|
| 794 |
+
if sem_option:
|
| 795 |
+
if sem_option == "acquire":
|
| 796 |
+
sem = ir.MEM_SEMANTIC.ACQUIRE
|
| 797 |
+
elif sem_option == "release":
|
| 798 |
+
sem = ir.MEM_SEMANTIC.RELEASE
|
| 799 |
+
elif sem_option == "acq_rel":
|
| 800 |
+
sem = ir.MEM_SEMANTIC.ACQUIRE_RELEASE
|
| 801 |
+
elif sem_option == "relaxed":
|
| 802 |
+
sem = ir.MEM_SEMANTIC.RELAXED
|
| 803 |
+
else:
|
| 804 |
+
raise ValueError(f"Memory semantic {sem_option} not supported")
|
| 805 |
+
return sem
|
| 806 |
+
|
| 807 |
+
|
| 808 |
+
def _str_to_scope(scope_option):
|
| 809 |
+
scope = ir.MEM_SYNC_SCOPE.GPU
|
| 810 |
+
if scope_option:
|
| 811 |
+
if scope_option == "gpu":
|
| 812 |
+
scope = ir.MEM_SYNC_SCOPE.GPU
|
| 813 |
+
elif scope_option == "cta":
|
| 814 |
+
scope = ir.MEM_SYNC_SCOPE.CTA
|
| 815 |
+
elif scope_option == "sys":
|
| 816 |
+
scope = ir.MEM_SYNC_SCOPE.SYSTEM
|
| 817 |
+
else:
|
| 818 |
+
raise ValueError(f"Memory semantic {scope_option} not supported")
|
| 819 |
+
return scope
|
| 820 |
+
|
| 821 |
+
|
| 822 |
+
def _canonicalize_boundary_check(boundary_check, block_shape):
|
| 823 |
+
if boundary_check:
|
| 824 |
+
if not hasattr(boundary_check, "__iter__"):
|
| 825 |
+
boundary_check = [boundary_check]
|
| 826 |
+
boundary_check = [elem.value if isinstance(elem, tl.constexpr) else elem for elem in boundary_check]
|
| 827 |
+
for dim in boundary_check:
|
| 828 |
+
assert isinstance(dim, int) and 0 <= dim < len(block_shape)
|
| 829 |
+
assert len(boundary_check) > 0
|
| 830 |
+
assert len(boundary_check) == len(set(boundary_check)), "Duplicate dimension in `boundary_check`"
|
| 831 |
+
return sorted(boundary_check)
|
| 832 |
+
return tuple()
|
| 833 |
+
|
| 834 |
+
|
| 835 |
+
def _load_block_pointer(ptr, mask, other, boundary_check, padding, cache, eviction, is_volatile, builder):
|
| 836 |
+
# Load by a block pointer: `pointer_type<block_type<>>`
|
| 837 |
+
# Block pointer can not have `mask` and `other` arguments
|
| 838 |
+
if mask or other:
|
| 839 |
+
raise ValueError("`mask` and `other` arguments cannot be specified for loading block pointers")
|
| 840 |
+
|
| 841 |
+
elt_ty = ptr.type.element_ty.element_ty
|
| 842 |
+
assert elt_ty != tl.int1, "`tl.int1` should be rewrited in `tl.make_block_ptr`"
|
| 843 |
+
if elt_ty.is_int() and padding == ir.PADDING_OPTION.PAD_NAN:
|
| 844 |
+
raise ValueError("Padding option `nan` is not supported for integer block pointers")
|
| 845 |
+
|
| 846 |
+
# `dst_ty` is de-referenced type of the pointer type
|
| 847 |
+
dst_ty = ptr.type.element_ty
|
| 848 |
+
|
| 849 |
+
# Check `boundary_check` argument
|
| 850 |
+
boundary_check = _canonicalize_boundary_check(boundary_check, dst_ty.get_block_shapes())
|
| 851 |
+
|
| 852 |
+
# Build IR
|
| 853 |
+
return tl.tensor(
|
| 854 |
+
builder.create_tensor_pointer_load(ptr.handle, boundary_check, padding, cache, eviction, is_volatile), dst_ty)
|
| 855 |
+
|
| 856 |
+
|
| 857 |
+
def _load_legacy(ptr, mask, other, boundary_check, padding, cache, eviction, is_volatile, builder):
|
| 858 |
+
# Load by a tensor of pointers or a pointer of scalar: `block_type<pointer_type<>>` or `pointer_type<>`
|
| 859 |
+
if not ptr.type.scalar.is_ptr():
|
| 860 |
+
raise ValueError(f"Unsupported ptr type {ptr.type.__repr__()} in `tl.load`")
|
| 861 |
+
|
| 862 |
+
# Check `mask`, `other`, `boundary_check`, and `padding` arguments
|
| 863 |
+
if not mask and other:
|
| 864 |
+
raise ValueError("`other` cannot be provided without `mask`")
|
| 865 |
+
if padding or boundary_check:
|
| 866 |
+
raise ValueError("`padding_option` or `boundary_check` argument is not supported for loading a tensor of"
|
| 867 |
+
"pointers or loading a scalar. Because the compiler does not know the boundary; please "
|
| 868 |
+
"use block pointers (defined by `make_block_ptr`) instead")
|
| 869 |
+
|
| 870 |
+
# For a pointer of scalar, check the type of `mask` and `other`
|
| 871 |
+
if not ptr.type.is_block():
|
| 872 |
+
if mask and mask.type.is_block():
|
| 873 |
+
raise ValueError("Mask argument cannot be block type if pointer argument is not a block")
|
| 874 |
+
if other and other.type.is_block():
|
| 875 |
+
raise ValueError("Other argument cannot be block type if pointer argument is not a block")
|
| 876 |
+
|
| 877 |
+
# Make `mask` and `other` into the same shape as `ptr`
|
| 878 |
+
if ptr.type.is_block():
|
| 879 |
+
if mask:
|
| 880 |
+
mask = broadcast_impl_shape(mask, ptr.type.get_block_shapes(), builder)
|
| 881 |
+
if other:
|
| 882 |
+
other = broadcast_impl_shape(other, ptr.type.get_block_shapes(), builder)
|
| 883 |
+
|
| 884 |
+
# Get `pointer_type<elt_ty>` and `elt_ty`
|
| 885 |
+
ptr_ty = ptr.type.scalar
|
| 886 |
+
elt_ty = ptr_ty.element_ty
|
| 887 |
+
|
| 888 |
+
# Treat `pointer_type<tl.int1>` as `pointer_type<tl.int8>`
|
| 889 |
+
if elt_ty == tl.int1:
|
| 890 |
+
elt_ty = tl.int8
|
| 891 |
+
ptr_ty = tl.pointer_type(elt_ty, ptr_ty.address_space)
|
| 892 |
+
ptr = cast(ptr, ptr_ty, builder)
|
| 893 |
+
|
| 894 |
+
# Cast `other` into `ele_ty` type
|
| 895 |
+
if other:
|
| 896 |
+
other = cast(other, elt_ty, builder)
|
| 897 |
+
|
| 898 |
+
# Create loaded result type `dst_ty`
|
| 899 |
+
if ptr.type.is_block():
|
| 900 |
+
shape = ptr.type.get_block_shapes()
|
| 901 |
+
dst_ty = tl.block_type(elt_ty, shape)
|
| 902 |
+
else:
|
| 903 |
+
# Load by de-referencing the pointer of scalar
|
| 904 |
+
dst_ty = elt_ty
|
| 905 |
+
|
| 906 |
+
# Build IR
|
| 907 |
+
if not mask:
|
| 908 |
+
return tl.tensor(builder.create_load(ptr.handle, cache, eviction, is_volatile), dst_ty)
|
| 909 |
+
else:
|
| 910 |
+
return tl.tensor(
|
| 911 |
+
builder.create_masked_load(ptr.handle, mask.handle, other.handle if other else None, cache, eviction,
|
| 912 |
+
is_volatile), dst_ty)
|
| 913 |
+
|
| 914 |
+
|
| 915 |
+
def load(ptr: tl.tensor, mask: Optional[tl.tensor], other: Optional[tl.tensor], boundary_check, padding_option: str,
|
| 916 |
+
cache_modifier: str, eviction_policy: str, is_volatile: bool, builder: ir.builder) -> tl.tensor:
|
| 917 |
+
# Cache, eviction and padding options
|
| 918 |
+
cache = _str_to_load_cache_modifier(cache_modifier)
|
| 919 |
+
eviction = _str_to_eviction_policy(eviction_policy)
|
| 920 |
+
padding = _str_to_padding_option(padding_option)
|
| 921 |
+
|
| 922 |
+
if ptr.type.is_ptr() and ptr.type.element_ty.is_block():
|
| 923 |
+
# Load by a block pointer: `pointer_type<block_type<>>`
|
| 924 |
+
return _load_block_pointer(ptr, mask, other, boundary_check, padding, cache, eviction, is_volatile, builder)
|
| 925 |
+
else:
|
| 926 |
+
# Load by a tensor of pointers or a pointer of scalar: `block_type<pointer_type<>>` or `pointer_type<>`
|
| 927 |
+
return _load_legacy(ptr, mask, other, boundary_check, padding, cache, eviction, is_volatile, builder)
|
| 928 |
+
|
| 929 |
+
|
| 930 |
+
def _store_block_pointer(ptr, val, mask, boundary_check, cache, eviction, builder):
|
| 931 |
+
# Store by a block pointer: `pointer_type<block_type<>>`
|
| 932 |
+
# Block pointers can not have the `mask` argument
|
| 933 |
+
if mask:
|
| 934 |
+
raise ValueError("`mask` and `other` arguments cannot be specified for loading block pointers")
|
| 935 |
+
|
| 936 |
+
# Check same shape and element type
|
| 937 |
+
block_shape = ptr.type.element_ty.get_block_shapes()
|
| 938 |
+
if not val.type.is_block():
|
| 939 |
+
val = broadcast_impl_shape(val, block_shape, builder)
|
| 940 |
+
assert val.type.is_block(), "Value argument must be block type or a scalar"
|
| 941 |
+
assert block_shape == val.type.get_block_shapes(
|
| 942 |
+
), f"Block shape({block_shape}) and value shape({val.type.get_block_shapes()}) mismatch"
|
| 943 |
+
assert ptr.type.element_ty.element_ty == val.type.element_ty, f"Block element type({ptr.type.element_ty.element_ty}) and value element type({val.type.element_ty}) mismatch"
|
| 944 |
+
|
| 945 |
+
elt_ty = ptr.type.element_ty.element_ty
|
| 946 |
+
assert elt_ty != tl.int1, "`tl.int1` should be rewrited in `tl.make_block_ptr`"
|
| 947 |
+
|
| 948 |
+
# Check `boundary_check` argument
|
| 949 |
+
boundary_check = _canonicalize_boundary_check(boundary_check, block_shape)
|
| 950 |
+
|
| 951 |
+
# Build IR
|
| 952 |
+
return tl.tensor(builder.create_tensor_pointer_store(ptr.handle, val.handle, boundary_check, cache, eviction),
|
| 953 |
+
tl.void)
|
| 954 |
+
|
| 955 |
+
|
| 956 |
+
def _store_legacy(ptr, val, mask, boundary_check, cache, eviction, builder):
|
| 957 |
+
# Store by a tensor of pointers or a pointer of scalar: `block_type<pointer_type<>>` or `pointer_type<>`
|
| 958 |
+
if not ptr.type.scalar.is_ptr():
|
| 959 |
+
raise ValueError(f"Unsupported ptr type {ptr.type.__repr__()} in `tl.store`")
|
| 960 |
+
|
| 961 |
+
# Check `boundary_check` argument
|
| 962 |
+
if boundary_check:
|
| 963 |
+
raise ValueError("`boundary_check` argument is not supported for storing a tensor of pointers or storing a "
|
| 964 |
+
"scalar. Because the compiler does not know the boundary; please use block pointers "
|
| 965 |
+
"(defined by `make_block_ptr`) instead")
|
| 966 |
+
|
| 967 |
+
# For a pointer of scalar, check the type of `val` and `mask`
|
| 968 |
+
if not ptr.type.is_block():
|
| 969 |
+
if val.type.is_block():
|
| 970 |
+
raise ValueError("Value argument cannot be block type if pointer argument is not a block")
|
| 971 |
+
if mask and mask.type.is_block():
|
| 972 |
+
raise ValueError("Mask argument cannot be block type if pointer argument is not a block")
|
| 973 |
+
|
| 974 |
+
# Make `mask` and `val` into the same shape as `ptr`
|
| 975 |
+
if ptr.type.is_block():
|
| 976 |
+
val = broadcast_impl_shape(val, ptr.type.get_block_shapes(), builder)
|
| 977 |
+
if mask:
|
| 978 |
+
mask = broadcast_impl_shape(mask, ptr.type.get_block_shapes(), builder)
|
| 979 |
+
|
| 980 |
+
ptr_ty = ptr.type.scalar
|
| 981 |
+
elt_ty = ptr_ty.element_ty
|
| 982 |
+
|
| 983 |
+
# Treat `pointer_type<tl.int1>` as `pointer_type<tl.int8>`
|
| 984 |
+
if elt_ty == tl.int1:
|
| 985 |
+
elt_ty = tl.int8
|
| 986 |
+
ptr_ty = tl.pointer_type(elt_ty, ptr_ty.address_space)
|
| 987 |
+
ptr = cast(ptr, ptr_ty, builder)
|
| 988 |
+
|
| 989 |
+
# Cast to target data type
|
| 990 |
+
val = cast(val, elt_ty, builder)
|
| 991 |
+
|
| 992 |
+
# Build IR
|
| 993 |
+
if not mask:
|
| 994 |
+
return tl.tensor(builder.create_store(ptr.handle, val.handle, cache, eviction), tl.void)
|
| 995 |
+
if not mask.type.scalar.is_bool():
|
| 996 |
+
raise ValueError("Mask must have boolean scalar type")
|
| 997 |
+
return tl.tensor(builder.create_masked_store(ptr.handle, val.handle, mask.handle, cache, eviction), tl.void)
|
| 998 |
+
|
| 999 |
+
|
| 1000 |
+
def store(ptr: tl.tensor, val: tl.tensor, mask: Optional[tl.tensor], boundary_check, cache_modifier: str,
|
| 1001 |
+
eviction_policy: str, builder: ir.builder) -> tl.tensor:
|
| 1002 |
+
# Cache and eviction options
|
| 1003 |
+
cache = _str_to_store_cache_modifier(cache_modifier)
|
| 1004 |
+
eviction = _str_to_eviction_policy(eviction_policy)
|
| 1005 |
+
|
| 1006 |
+
if ptr.type.is_ptr() and ptr.type.element_ty.is_block():
|
| 1007 |
+
# Store by a block pointer: `pointer_type<block_type<>>`
|
| 1008 |
+
return _store_block_pointer(ptr, val, mask, boundary_check, cache, eviction, builder)
|
| 1009 |
+
else:
|
| 1010 |
+
# Store by a tensor of pointers or a pointer of scalar: `block_type<pointer_type<>>` or `pointer_type<>`
|
| 1011 |
+
return _store_legacy(ptr, val, mask, boundary_check, cache, eviction, builder)
|
| 1012 |
+
|
| 1013 |
+
|
| 1014 |
+
#########
|
| 1015 |
+
# atomic
|
| 1016 |
+
#########
|
| 1017 |
+
|
| 1018 |
+
|
| 1019 |
+
def atomic_cas(ptr: tl.tensor, cmp: tl.tensor, val: tl.tensor, sem: str, scope: str, builder: ir.builder) -> tl.tensor:
|
| 1020 |
+
sem = _str_to_sem(sem)
|
| 1021 |
+
scope = _str_to_scope(scope)
|
| 1022 |
+
element_ty = ptr.type.scalar.element_ty
|
| 1023 |
+
if element_ty.primitive_bitwidth not in [16, 32, 64]:
|
| 1024 |
+
raise ValueError("atomic_cas only supports elements with width {16, 32, 64}")
|
| 1025 |
+
return tl.tensor(builder.create_atomic_cas(ptr.handle, cmp.handle, val.handle, sem, scope), val.type)
|
| 1026 |
+
|
| 1027 |
+
|
| 1028 |
+
def atom_red_typechecking_impl(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, op: str,
|
| 1029 |
+
builder: ir.builder) -> Tuple[tl.tensor, tl.tensor, tl.tensor]:
|
| 1030 |
+
if not ptr.type.scalar.is_ptr():
|
| 1031 |
+
raise ValueError("Pointer argument of store instruction is " + ptr.type.__repr__())
|
| 1032 |
+
element_ty = ptr.type.scalar.element_ty
|
| 1033 |
+
if element_ty is tl.float16 and op != 'add':
|
| 1034 |
+
raise ValueError("atomic_" + op + " does not support fp16")
|
| 1035 |
+
if element_ty in [tl.int1, tl.int8, tl.int16, tl.bfloat16]:
|
| 1036 |
+
raise ValueError("atomic_" + op + " does not support " + str(element_ty))
|
| 1037 |
+
if ptr.type.is_block():
|
| 1038 |
+
if mask:
|
| 1039 |
+
mask = broadcast_impl_shape(mask, ptr.type.get_block_shapes(), builder)
|
| 1040 |
+
if val:
|
| 1041 |
+
val = broadcast_impl_shape(val, ptr.type.get_block_shapes(), builder)
|
| 1042 |
+
val = cast(val, ptr.type.scalar.element_ty, builder)
|
| 1043 |
+
if not mask:
|
| 1044 |
+
mask_ir = builder.get_int1(True)
|
| 1045 |
+
mask_ty = tl.int1
|
| 1046 |
+
if ptr.type.is_block():
|
| 1047 |
+
mask_ir = builder.create_splat(mask_ir, ptr.type.get_block_shapes())
|
| 1048 |
+
mask_ty = tl.block_type(tl.int1, ptr.type.get_block_shapes())
|
| 1049 |
+
mask = tl.tensor(mask_ir, mask_ty)
|
| 1050 |
+
return ptr, val, mask
|
| 1051 |
+
|
| 1052 |
+
|
| 1053 |
+
def atomic_max(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, scope: str, builder: ir.builder) -> tl.tensor:
|
| 1054 |
+
ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'max', builder)
|
| 1055 |
+
sem = _str_to_sem(sem)
|
| 1056 |
+
scope = _str_to_scope(scope)
|
| 1057 |
+
sca_ty = val.type.scalar
|
| 1058 |
+
# direct call to atomic_max for integers
|
| 1059 |
+
if sca_ty.is_int():
|
| 1060 |
+
if sca_ty.is_int_signed():
|
| 1061 |
+
return tl.tensor(
|
| 1062 |
+
builder.create_atomic_rmw(ir.ATOMIC_OP.MAX, ptr.handle, val.handle, mask.handle, sem, scope), val.type)
|
| 1063 |
+
else:
|
| 1064 |
+
return tl.tensor(
|
| 1065 |
+
builder.create_atomic_rmw(ir.ATOMIC_OP.UMAX, ptr.handle, val.handle, mask.handle, sem, scope), val.type)
|
| 1066 |
+
# for float
|
| 1067 |
+
# return atomic_smax(i_ptr, i_val) if val >= 0
|
| 1068 |
+
# return atomic_umin(i_ptr, i_val) if val < 0
|
| 1069 |
+
if sca_ty not in {tl.float32, tl.float64}:
|
| 1070 |
+
raise TypeError(f"atomic_max not supported for dtype {sca_ty}")
|
| 1071 |
+
|
| 1072 |
+
itype = tl.int32 if sca_ty == tl.float32 else tl.float64
|
| 1073 |
+
zero = full([], 0.0, sca_ty, builder)
|
| 1074 |
+
|
| 1075 |
+
i_val = bitcast(val, itype, builder)
|
| 1076 |
+
i_ptr = bitcast(ptr, tl.pointer_type(itype, 1), builder)
|
| 1077 |
+
pos = greater_equal(val, zero, builder)
|
| 1078 |
+
neg = less_than(val, zero, builder)
|
| 1079 |
+
pos_ret = tl.tensor(
|
| 1080 |
+
builder.create_atomic_rmw(ir.ATOMIC_OP.MAX, i_ptr.handle, i_val.handle,
|
| 1081 |
+
and_(mask, pos, builder).handle, sem, scope), i_val.type)
|
| 1082 |
+
neg_ret = tl.tensor(
|
| 1083 |
+
builder.create_atomic_rmw(ir.ATOMIC_OP.UMIN, i_ptr.handle, i_val.handle,
|
| 1084 |
+
and_(mask, neg, builder).handle, sem, scope), i_val.type)
|
| 1085 |
+
ret = where(pos, pos_ret, neg_ret, builder)
|
| 1086 |
+
return bitcast(ret, sca_ty, builder)
|
| 1087 |
+
|
| 1088 |
+
|
| 1089 |
+
def atomic_min(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, scope: str, builder: ir.builder) -> tl.tensor:
|
| 1090 |
+
ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'min', builder)
|
| 1091 |
+
sem = _str_to_sem(sem)
|
| 1092 |
+
scope = _str_to_scope(scope)
|
| 1093 |
+
sca_ty = val.type.scalar
|
| 1094 |
+
# direct call to atomic_min for integers
|
| 1095 |
+
if sca_ty.is_int():
|
| 1096 |
+
if sca_ty.is_int_signed():
|
| 1097 |
+
return tl.tensor(
|
| 1098 |
+
builder.create_atomic_rmw(ir.ATOMIC_OP.MIN, ptr.handle, val.handle, mask.handle, sem, scope), val.type)
|
| 1099 |
+
else:
|
| 1100 |
+
return tl.tensor(
|
| 1101 |
+
builder.create_atomic_rmw(ir.ATOMIC_OP.UMIN, ptr.handle, val.handle, mask.handle, sem, scope), val.type)
|
| 1102 |
+
# for float
|
| 1103 |
+
# return atomic_smin(i_ptr, i_val) if val >= 0
|
| 1104 |
+
# return atomic_umax(i_ptr, i_val) if val < 0
|
| 1105 |
+
if sca_ty not in {tl.float32, tl.float64}:
|
| 1106 |
+
raise TypeError(f"atomic_min not supported for dtype {sca_ty}")
|
| 1107 |
+
|
| 1108 |
+
itype = tl.int32 if sca_ty == tl.float32 else tl.float64
|
| 1109 |
+
zero = full([], 0.0, sca_ty, builder)
|
| 1110 |
+
|
| 1111 |
+
i_val = bitcast(val, itype, builder)
|
| 1112 |
+
i_ptr = bitcast(ptr, tl.pointer_type(itype, 1), builder)
|
| 1113 |
+
pos = greater_equal(val, zero, builder)
|
| 1114 |
+
neg = less_than(val, zero, builder)
|
| 1115 |
+
pos_ret = tl.tensor(
|
| 1116 |
+
builder.create_atomic_rmw(ir.ATOMIC_OP.MIN, i_ptr.handle, i_val.handle,
|
| 1117 |
+
and_(mask, pos, builder).handle, sem, scope), i_val.type)
|
| 1118 |
+
neg_ret = tl.tensor(
|
| 1119 |
+
builder.create_atomic_rmw(ir.ATOMIC_OP.UMAX, i_ptr.handle, i_val.handle,
|
| 1120 |
+
and_(mask, neg, builder).handle, sem, scope), i_val.type)
|
| 1121 |
+
ret = where(pos, pos_ret, neg_ret, builder)
|
| 1122 |
+
return bitcast(ret, sca_ty, builder)
|
| 1123 |
+
|
| 1124 |
+
|
| 1125 |
+
def atomic_add(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, scope: str, builder: ir.builder) -> tl.tensor:
|
| 1126 |
+
ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'add', builder)
|
| 1127 |
+
sem = _str_to_sem(sem)
|
| 1128 |
+
scope = _str_to_scope(scope)
|
| 1129 |
+
sca_ty = val.type.scalar
|
| 1130 |
+
op = ir.ATOMIC_OP.FADD if sca_ty.is_floating() else ir.ATOMIC_OP.ADD
|
| 1131 |
+
return tl.tensor(builder.create_atomic_rmw(op, ptr.handle, val.handle, mask.handle, sem, scope), val.type)
|
| 1132 |
+
|
| 1133 |
+
|
| 1134 |
+
def atomic_and(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, scope: str, builder: ir.builder) -> tl.tensor:
|
| 1135 |
+
ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'and', builder)
|
| 1136 |
+
sem = _str_to_sem(sem)
|
| 1137 |
+
scope = _str_to_scope(scope)
|
| 1138 |
+
return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.AND, ptr.handle, val.handle, mask.handle, sem, scope),
|
| 1139 |
+
val.type)
|
| 1140 |
+
|
| 1141 |
+
|
| 1142 |
+
def atomic_or(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, scope: str, builder: ir.builder) -> tl.tensor:
|
| 1143 |
+
ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'or', builder)
|
| 1144 |
+
sem = _str_to_sem(sem)
|
| 1145 |
+
scope = _str_to_scope(scope)
|
| 1146 |
+
return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.OR, ptr.handle, val.handle, mask.handle, sem, scope),
|
| 1147 |
+
val.type)
|
| 1148 |
+
|
| 1149 |
+
|
| 1150 |
+
def atomic_xor(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, scope: str, builder: ir.builder) -> tl.tensor:
|
| 1151 |
+
ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'xor', builder)
|
| 1152 |
+
sem = _str_to_sem(sem)
|
| 1153 |
+
scope = _str_to_scope(scope)
|
| 1154 |
+
return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.XOR, ptr.handle, val.handle, mask.handle, sem, scope),
|
| 1155 |
+
val.type)
|
| 1156 |
+
|
| 1157 |
+
|
| 1158 |
+
def atomic_xchg(ptr: tl.tensor, val: tl.tensor, mask: tl.tensor, sem: str, scope: str,
|
| 1159 |
+
builder: ir.builder) -> tl.tensor:
|
| 1160 |
+
ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'xchg', builder)
|
| 1161 |
+
sem = _str_to_sem(sem)
|
| 1162 |
+
scope = _str_to_scope(scope)
|
| 1163 |
+
return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.XCHG, ptr.handle, val.handle, mask.handle, sem, scope),
|
| 1164 |
+
val.type)
|
| 1165 |
+
|
| 1166 |
+
|
| 1167 |
+
# ===----------------------------------------------------------------------===//
|
| 1168 |
+
# Linear Algebra
|
| 1169 |
+
# ===----------------------------------------------------------------------===//
|
| 1170 |
+
|
| 1171 |
+
|
| 1172 |
+
def gpu_has_mfma() -> bool:
|
| 1173 |
+
if not is_hip():
|
| 1174 |
+
return False
|
| 1175 |
+
return True # mfma supported in ['gfx908', 'gfx90a']
|
| 1176 |
+
|
| 1177 |
+
|
| 1178 |
+
def mfma_supported(M, N, K, allow_tf32, ret_scalar_ty) -> bool:
|
| 1179 |
+
if not gpu_has_mfma():
|
| 1180 |
+
return False
|
| 1181 |
+
# TODO: Add check for configurations and types.
|
| 1182 |
+
return True
|
| 1183 |
+
|
| 1184 |
+
|
| 1185 |
+
def dot(lhs: tl.tensor, rhs: tl.tensor, acc: tl.tensor, allow_tf32: bool, max_num_imprecise_acc: int,
|
| 1186 |
+
out_dtype: tl.dtype, builder: ir.builder) -> tl.tensor:
|
| 1187 |
+
|
| 1188 |
+
def assert_dtypes_valid(lhs_dtype, rhs_dtype, target):
|
| 1189 |
+
# Checks for non-cuda archs
|
| 1190 |
+
if not _is_cuda(target):
|
| 1191 |
+
assert lhs_dtype == rhs_dtype, f"First input ({lhs_dtype}) and second input ({rhs_dtype}) must have the same dtype!"
|
| 1192 |
+
return
|
| 1193 |
+
# Checks for cuda arch
|
| 1194 |
+
if target.capability < 90:
|
| 1195 |
+
assert not lhs_dtype.is_fp8e4nv() and not rhs_dtype.is_fp8e4nv(
|
| 1196 |
+
), "Dot op does not support fp8e4nv on CUDA arch < 90"
|
| 1197 |
+
if lhs_dtype.is_fp8() and rhs_dtype.is_fp8():
|
| 1198 |
+
return
|
| 1199 |
+
assert lhs_dtype == rhs_dtype, f"First input ({lhs_dtype}) and second input ({rhs_dtype}) must have the same dtype!"
|
| 1200 |
+
else:
|
| 1201 |
+
assert not lhs_dtype.is_fp8e4b15() and not rhs_dtype.is_fp8e4b15(
|
| 1202 |
+
), "Dot op does not support fp8e4b15 on CUDA arch >= 90"
|
| 1203 |
+
assert not lhs_dtype.is_fp8e4b15x4() and not rhs_dtype.is_fp8e4b15x4(
|
| 1204 |
+
), "Dot op does not support fp8e4b15x4 on CUDA arch >= 90"
|
| 1205 |
+
if lhs_dtype.is_int() or rhs_dtype.is_int():
|
| 1206 |
+
assert lhs_dtype == rhs_dtype, f"Both operands must be same type. First operand ({lhs_dtype}) and second operand ({rhs_dtype})"
|
| 1207 |
+
assert lhs_dtype.is_int8() or lhs_dtype.is_uint8(
|
| 1208 |
+
), f"Both operands must be either int8 or uint8. Operand type ({lhs_dtype})"
|
| 1209 |
+
elif lhs_dtype.is_fp8() or rhs_dtype.is_fp8():
|
| 1210 |
+
assert lhs_dtype.is_fp8e4nv() or lhs_dtype.is_fp8e5(
|
| 1211 |
+
), f"Only supports fp8e4nv or fp8e5. First operand ({lhs_dtype})"
|
| 1212 |
+
assert rhs_dtype.is_fp8e4nv() or rhs_dtype.is_fp8e5(
|
| 1213 |
+
), f"Only supports fp8e4nv or fp8e5. Second operand ({rhs_dtype})"
|
| 1214 |
+
else:
|
| 1215 |
+
assert lhs_dtype.is_fp16() or lhs_dtype.is_bf16() or lhs_dtype.is_fp32() or lhs_dtype.is_int1(
|
| 1216 |
+
), f"Unsupported dtype {lhs_dtype}"
|
| 1217 |
+
assert rhs_dtype.is_fp16() or rhs_dtype.is_bf16() or rhs_dtype.is_fp32() or rhs_dtype.is_int1(
|
| 1218 |
+
), f"Unsupported dtype {rhs_dtype}"
|
| 1219 |
+
assert lhs_dtype == rhs_dtype, f"First input ({lhs_dtype}) and second input ({rhs_dtype}) must have the same dtype!"
|
| 1220 |
+
|
| 1221 |
+
assert lhs.type.is_block() and rhs.type.is_block()
|
| 1222 |
+
|
| 1223 |
+
assert_dtypes_valid(lhs.dtype, rhs.dtype, builder.target)
|
| 1224 |
+
|
| 1225 |
+
assert len(lhs.shape) == 2, f"First input shape ({lhs.shape}) is not two dimensional!"
|
| 1226 |
+
assert len(rhs.shape) == 2, f"Second input shape ({rhs.shape}) is not two dimensional!"
|
| 1227 |
+
assert lhs.shape[1].value == rhs.shape[
|
| 1228 |
+
0].value, f"First input shape ({lhs.shape}) and second input shape {rhs.shape} are not compatible for matmul (second index of first shape ({lhs.shape[1].value}) must be equal to first index of second shape ({rhs.shape[0].value})"
|
| 1229 |
+
assert lhs.shape[0].value >= 16 and lhs.shape[1].value >= 16 \
|
| 1230 |
+
and rhs.shape[1].value >= 16, \
|
| 1231 |
+
f"All values in both first input shape ({lhs.shape}) and second input shape ({rhs.shape}) must be >= 16!"
|
| 1232 |
+
if lhs.type.scalar.is_int():
|
| 1233 |
+
assert lhs.type.scalar == tl.int8, "only int8 supported!"
|
| 1234 |
+
# TODO: This is CUDA specific, check if ROCm has the same limitation
|
| 1235 |
+
assert lhs.shape[1].value >= 32, "small blocks not supported!"
|
| 1236 |
+
_0 = builder.get_int32(0)
|
| 1237 |
+
ret_scalar_ty = tl.int32
|
| 1238 |
+
elif out_dtype.is_bf16():
|
| 1239 |
+
raise ValueError(
|
| 1240 |
+
"out_dtype=bfloat16 is unsupported. Please use out_dtype=float32/float16 and cast with `.to(tl.bfloat16)`")
|
| 1241 |
+
elif lhs.type.scalar.is_fp32() or lhs.type.scalar.is_bf16():
|
| 1242 |
+
_0 = builder.get_fp32(0)
|
| 1243 |
+
ret_scalar_ty = tl.float32
|
| 1244 |
+
else:
|
| 1245 |
+
_0 = builder.get_fp16(0) if out_dtype.is_fp16() else builder.get_fp32(0)
|
| 1246 |
+
ret_scalar_ty = out_dtype
|
| 1247 |
+
|
| 1248 |
+
M = lhs.type.shape[0]
|
| 1249 |
+
N = rhs.type.shape[1]
|
| 1250 |
+
|
| 1251 |
+
# Cast operands of types f16 and i8 for configurations where FMA only supported.
|
| 1252 |
+
if is_hip() and not mfma_supported(M, N, lhs.type.shape[1], allow_tf32, ret_scalar_ty):
|
| 1253 |
+
ret_cast_scalar_ty = tl.float32 if lhs.type.scalar.is_int() else ret_scalar_ty
|
| 1254 |
+
lhs = cast(lhs, ret_cast_scalar_ty, builder)
|
| 1255 |
+
rhs = cast(rhs, ret_cast_scalar_ty, builder)
|
| 1256 |
+
if ret_cast_scalar_ty == tl.float16:
|
| 1257 |
+
_0 = builder.create_splat(builder.get_fp16(0), [M, N])
|
| 1258 |
+
else:
|
| 1259 |
+
_0 = builder.create_splat(builder.get_fp32(0), [M, N])
|
| 1260 |
+
ret_ty = tl.block_type(ret_cast_scalar_ty, [M, N])
|
| 1261 |
+
ret = tl.tensor(builder.create_dot(lhs.handle, rhs.handle, _0, allow_tf32), ret_ty)
|
| 1262 |
+
return cast(ret, ret_scalar_ty, builder)
|
| 1263 |
+
if is_hip() and mfma_supported(M, N, lhs.type.shape[1], allow_tf32,
|
| 1264 |
+
ret_scalar_ty) and ret_scalar_ty.primitive_bitwidth < 32:
|
| 1265 |
+
if lhs.type.scalar.is_int():
|
| 1266 |
+
ret_dot_scalar_ty = tl.int32
|
| 1267 |
+
_0 = builder.create_splat(builder.get_int32(0), [M, N])
|
| 1268 |
+
else:
|
| 1269 |
+
ret_dot_scalar_ty = tl.float32
|
| 1270 |
+
_0 = builder.create_splat(builder.get_fp32(0), [M, N])
|
| 1271 |
+
ret_ty = tl.block_type(ret_dot_scalar_ty, [M, N])
|
| 1272 |
+
ret = tl.tensor(builder.create_dot(lhs.handle, rhs.handle, _0, allow_tf32), ret_ty)
|
| 1273 |
+
return cast(ret, ret_scalar_ty, builder)
|
| 1274 |
+
ret_ty = tl.block_type(ret_scalar_ty, [M, N])
|
| 1275 |
+
if acc is None:
|
| 1276 |
+
acc_handle = builder.create_splat(_0, [M, N])
|
| 1277 |
+
else:
|
| 1278 |
+
acc_handle = acc.handle
|
| 1279 |
+
assert acc.type == ret_ty
|
| 1280 |
+
|
| 1281 |
+
# max_num_imprecise_acc only applies to fp8 -> fp32 dot on sm_90
|
| 1282 |
+
if not (_is_cuda(builder.target) and builder.target.capability == 90 and lhs.dtype.is_fp8() and rhs.dtype.is_fp8()
|
| 1283 |
+
and ret_scalar_ty.is_fp32()):
|
| 1284 |
+
max_num_imprecise_acc = 0
|
| 1285 |
+
if max_num_imprecise_acc is None:
|
| 1286 |
+
max_num_imprecise_acc = 2**30
|
| 1287 |
+
|
| 1288 |
+
return tl.tensor(builder.create_dot(lhs.handle, rhs.handle, acc_handle, allow_tf32, max_num_imprecise_acc), ret_ty)
|
| 1289 |
+
|
| 1290 |
+
|
| 1291 |
+
# ===----------------------------------------------------------------------===//
|
| 1292 |
+
# Indexing
|
| 1293 |
+
# ===----------------------------------------------------------------------===//
|
| 1294 |
+
|
| 1295 |
+
|
| 1296 |
+
def where(condition: tl.tensor, x: tl.tensor, y: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 1297 |
+
condition = cast(condition, tl.int1, builder)
|
| 1298 |
+
if condition.type.is_block():
|
| 1299 |
+
condition, x = broadcast_impl_value(condition, x, builder)
|
| 1300 |
+
x, y = broadcast_impl_value(x, y, builder)
|
| 1301 |
+
condition, x = broadcast_impl_value(condition, x, builder)
|
| 1302 |
+
|
| 1303 |
+
x, y = binary_op_type_checking_impl(x, y, builder, True, True)
|
| 1304 |
+
if not condition.type.is_block():
|
| 1305 |
+
condition, _ = broadcast_impl_value(condition, x, builder)
|
| 1306 |
+
ret_ty = x.type
|
| 1307 |
+
return tl.tensor(builder.create_select(condition.handle, x.handle, y.handle), ret_ty)
|
| 1308 |
+
|
| 1309 |
+
|
| 1310 |
+
# ===----------------------------------------------------------------------===//
|
| 1311 |
+
# Reduction
|
| 1312 |
+
# ===----------------------------------------------------------------------===
|
| 1313 |
+
|
| 1314 |
+
|
| 1315 |
+
def reduction(inputs: Sequence[tl.tensor], axis: int, region_builder_fn, builder: ir.builder) -> Tuple[tl.tensor, ...]:
|
| 1316 |
+
if axis is None:
|
| 1317 |
+
new_inputs = []
|
| 1318 |
+
for i in range(len(inputs)):
|
| 1319 |
+
new_shape = [inputs[i].numel.value]
|
| 1320 |
+
new_inputs.append(view(inputs[i], new_shape, builder))
|
| 1321 |
+
inputs = tuple(new_inputs)
|
| 1322 |
+
axis = 0
|
| 1323 |
+
# get result shape
|
| 1324 |
+
shape = inputs[0].type.shape
|
| 1325 |
+
ret_shape = [s for i, s in enumerate(shape) if i != axis]
|
| 1326 |
+
for t in inputs:
|
| 1327 |
+
assert t.type.shape == shape
|
| 1328 |
+
|
| 1329 |
+
def wrap_tensor(x, scalar_ty):
|
| 1330 |
+
if ret_shape:
|
| 1331 |
+
res_ty = tl.block_type(scalar_ty, ret_shape)
|
| 1332 |
+
else:
|
| 1333 |
+
# 0d-tensor -> scalar
|
| 1334 |
+
res_ty = scalar_ty
|
| 1335 |
+
return tl.tensor(x, res_ty)
|
| 1336 |
+
|
| 1337 |
+
reduce_op = builder.create_reduce([t.handle for t in inputs], axis)
|
| 1338 |
+
region_builder_fn(reduce_op)
|
| 1339 |
+
reduce_op.verify()
|
| 1340 |
+
|
| 1341 |
+
return tuple(wrap_tensor(reduce_op.get_result(i), inputs[i].type.scalar) for i in range(len(inputs)))
|
| 1342 |
+
|
| 1343 |
+
|
| 1344 |
+
# ===----------------------------------------------------------------------===
|
| 1345 |
+
# Associative Scan
|
| 1346 |
+
# ===----------------------------------------------------------------------===
|
| 1347 |
+
|
| 1348 |
+
|
| 1349 |
+
def associative_scan(inputs: Sequence[tl.tensor], axis: int, region_builder_fn,
|
| 1350 |
+
builder: ir.builder) -> Tuple[tl.tensor, ...]:
|
| 1351 |
+
if len(inputs) != 1:
|
| 1352 |
+
raise ValueError("Current implementation only support single tensor input")
|
| 1353 |
+
shape = inputs[0].type.shape
|
| 1354 |
+
|
| 1355 |
+
def wrap_tensor(x, scalar_ty):
|
| 1356 |
+
res_ty = tl.block_type(scalar_ty, shape)
|
| 1357 |
+
return tl.tensor(x, res_ty)
|
| 1358 |
+
|
| 1359 |
+
scan_op = builder.create_scan([t.handle for t in inputs], axis)
|
| 1360 |
+
region_builder_fn(scan_op)
|
| 1361 |
+
scan_op.verify()
|
| 1362 |
+
|
| 1363 |
+
return tuple(wrap_tensor(scan_op.get_result(i), inputs[i].type.scalar) for i in range(len(inputs)))
|
| 1364 |
+
|
| 1365 |
+
|
| 1366 |
+
# ===----------------------------------------------------------------------===
|
| 1367 |
+
# Math
|
| 1368 |
+
# ===----------------------------------------------------------------------===
|
| 1369 |
+
|
| 1370 |
+
|
| 1371 |
+
def _check_dtype(dtypes: List[str]) -> T:
|
| 1372 |
+
"""
|
| 1373 |
+
We're following libdevice's convention to check accepted data types for math functions.
|
| 1374 |
+
It is not a good practice to support all data types as accelerators/GPUs don't support
|
| 1375 |
+
many float16 and bfloat16 math operations.
|
| 1376 |
+
We should let the users know that they are using and invoke explicit cast to convert
|
| 1377 |
+
the data type to the supported one.
|
| 1378 |
+
"""
|
| 1379 |
+
|
| 1380 |
+
def wrapper(fn):
|
| 1381 |
+
|
| 1382 |
+
@wraps(fn)
|
| 1383 |
+
def check(*args, **kwargs):
|
| 1384 |
+
# concatenate args and kwargs
|
| 1385 |
+
all_args = list(args) + list(kwargs.values())
|
| 1386 |
+
for arg in [a for a in all_args if isinstance(a, tl.tensor)]:
|
| 1387 |
+
if arg.type.scalar.name not in dtypes:
|
| 1388 |
+
raise ValueError(f"Expected dtype {dtypes} but got {arg.type.scalar.name}")
|
| 1389 |
+
return fn(*args, **kwargs)
|
| 1390 |
+
|
| 1391 |
+
return check
|
| 1392 |
+
|
| 1393 |
+
return wrapper
|
| 1394 |
+
|
| 1395 |
+
|
| 1396 |
+
def umulhi(x: tl.tensor, y: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 1397 |
+
x, y = binary_op_type_checking_impl(x, y, builder)
|
| 1398 |
+
# FIXME(Keren): not portable, should be fixed
|
| 1399 |
+
from . import math
|
| 1400 |
+
return math.mulhi(x, y, _builder=builder)
|
| 1401 |
+
|
| 1402 |
+
|
| 1403 |
+
@_check_dtype(dtypes=["fp32", "fp64"])
|
| 1404 |
+
def floor(x: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 1405 |
+
# FIXME(Keren): not portable, should be fixed
|
| 1406 |
+
from . import math
|
| 1407 |
+
return math.floor(x, _builder=builder)
|
| 1408 |
+
|
| 1409 |
+
|
| 1410 |
+
@_check_dtype(dtypes=["fp32", "fp64"])
|
| 1411 |
+
def exp(x: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 1412 |
+
return tl.tensor(builder.create_exp(x.handle), x.type)
|
| 1413 |
+
|
| 1414 |
+
|
| 1415 |
+
@_check_dtype(dtypes=["fp32", "fp64"])
|
| 1416 |
+
def log(x: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 1417 |
+
return tl.tensor(builder.create_log(x.handle), x.type)
|
| 1418 |
+
|
| 1419 |
+
|
| 1420 |
+
@_check_dtype(dtypes=["fp32", "fp64"])
|
| 1421 |
+
def cos(x: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 1422 |
+
return tl.tensor(builder.create_cos(x.handle), x.type)
|
| 1423 |
+
|
| 1424 |
+
|
| 1425 |
+
@_check_dtype(dtypes=["fp32", "fp64"])
|
| 1426 |
+
def sin(x: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 1427 |
+
return tl.tensor(builder.create_sin(x.handle), x.type)
|
| 1428 |
+
|
| 1429 |
+
|
| 1430 |
+
@_check_dtype(dtypes=["fp32", "fp64"])
|
| 1431 |
+
def sqrt(x: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 1432 |
+
return tl.tensor(builder.create_sqrt(x.handle), x.type)
|
| 1433 |
+
|
| 1434 |
+
|
| 1435 |
+
def abs(x: tl.tensor, builder: ir.builder) -> tl.tensor:
|
| 1436 |
+
dtype = x.dtype
|
| 1437 |
+
if dtype.is_floating():
|
| 1438 |
+
return tl.tensor(builder.create_fabs(x.handle), x.type)
|
| 1439 |
+
elif dtype.is_int_signed():
|
| 1440 |
+
return tl.tensor(builder.create_iabs(x.handle), x.type)
|
| 1441 |
+
elif dtype.is_int_unsigned():
|
| 1442 |
+
return x # no-op
|
| 1443 |
+
else:
|
| 1444 |
+
assert False, f"Unexpected dtype {dtype}"
|
| 1445 |
+
|
| 1446 |
+
|
| 1447 |
+
##
|
| 1448 |
+
|
| 1449 |
+
|
| 1450 |
+
def multiple_of(x: tl.tensor, values: List[int]) -> tl.tensor:
|
| 1451 |
+
if max(1, len(x.shape)) != len(values):
|
| 1452 |
+
raise ValueError("Shape of input to multiple_of does not match the length of values")
|
| 1453 |
+
x.handle.set_attr("tt.divisibility", ir.make_attr(values, x.handle.get_context()))
|
| 1454 |
+
return x
|
| 1455 |
+
|
| 1456 |
+
|
| 1457 |
+
def max_contiguous(x: tl.tensor, values: List[int]) -> tl.tensor:
|
| 1458 |
+
if len(x.shape) != len(values):
|
| 1459 |
+
raise ValueError("Shape of input to max_contiguous does not match the length of values")
|
| 1460 |
+
x.handle.set_attr("tt.contiguity", ir.make_attr(values, x.handle.get_context()))
|
| 1461 |
+
return x
|
| 1462 |
+
|
| 1463 |
+
|
| 1464 |
+
def max_constancy(x: tl.tensor, values: List[int]) -> tl.tensor:
|
| 1465 |
+
if len(x.shape) != len(values):
|
| 1466 |
+
raise ValueError("Shape of input to max_constancy does not match the length of values")
|
| 1467 |
+
x.handle.set_attr("tt.constancy", ir.make_attr(values, x.handle.get_context()))
|
| 1468 |
+
return x
|
| 1469 |
+
|
| 1470 |
+
|
| 1471 |
+
def debug_barrier(builder: ir.builder) -> tl.tensor:
|
| 1472 |
+
return tl.tensor(builder.create_barrier(), tl.void)
|
| 1473 |
+
|
| 1474 |
+
|
| 1475 |
+
def device_print(prefix: str, args: List[tl.tensor], builder: ir.builder) -> tl.tensor:
|
| 1476 |
+
# It makes sense visually for prefix to end in ": "; make it so. Also,
|
| 1477 |
+
# non-empty prefixes should start with " ".
|
| 1478 |
+
if not prefix.endswith(" ") and args:
|
| 1479 |
+
prefix += " "
|
| 1480 |
+
if not prefix.endswith(": ") and args:
|
| 1481 |
+
prefix = prefix[:-1] + ": "
|
| 1482 |
+
if len(prefix) > 2 and not prefix.startswith(" "):
|
| 1483 |
+
prefix = " " + prefix
|
| 1484 |
+
|
| 1485 |
+
new_args = []
|
| 1486 |
+
for arg in args:
|
| 1487 |
+
new_args.append(arg.handle)
|
| 1488 |
+
return tl.tensor(builder.create_print(prefix, new_args), tl.void)
|
| 1489 |
+
|
| 1490 |
+
|
| 1491 |
+
def device_assert(cond: tl.tensor, msg: str, file_name: str, func_name, lineno: int, builder: ir.builder) -> tl.tensor:
|
| 1492 |
+
cond_ty = cond.type
|
| 1493 |
+
if not cond_ty.is_block():
|
| 1494 |
+
cond_ty = tl.block_type(cond_ty.scalar, (1, ))
|
| 1495 |
+
cond = tl.tensor(builder.create_splat(cond.handle, (1, )), cond_ty)
|
| 1496 |
+
return tl.tensor(builder.create_assert(cond.handle, msg, file_name, func_name, lineno), tl.void)
|
| 1497 |
+
|
| 1498 |
+
|
| 1499 |
+
def _convert_elem_to_ir_value(builder, elem, require_i64):
|
| 1500 |
+
if isinstance(elem, int):
|
| 1501 |
+
elem = tl.constexpr(elem)
|
| 1502 |
+
if isinstance(elem, tl.constexpr):
|
| 1503 |
+
return builder.get_int64(elem.value) if require_i64 else builder.get_int32(elem.value)
|
| 1504 |
+
elif isinstance(elem, tl.tensor):
|
| 1505 |
+
assert elem.numel.value == 1, "Expected a scalar in shape/strides/offsets"
|
| 1506 |
+
assert elem.dtype.is_int(), "Expected an integer scalar type in shape/strides/offsets"
|
| 1507 |
+
if elem.dtype != tl.int64 and require_i64:
|
| 1508 |
+
return builder.create_int_cast(elem.handle, builder.get_int64_ty(), elem.dtype.is_int_signed())
|
| 1509 |
+
elif elem.dtype != tl.int32:
|
| 1510 |
+
return builder.create_int_cast(elem.handle, builder.get_int32_ty(), elem.dtype.is_int_signed())
|
| 1511 |
+
return elem.handle
|
| 1512 |
+
assert False, f"Unsupported element type in shape/strides/offsets: {type(elem)}"
|
| 1513 |
+
|
| 1514 |
+
|
| 1515 |
+
def _convert_to_ir_values(builder, list_like, require_i64=True):
|
| 1516 |
+
if hasattr(list_like, "__iter__"):
|
| 1517 |
+
return [_convert_elem_to_ir_value(builder, elem, require_i64) for elem in list_like]
|
| 1518 |
+
return [_convert_elem_to_ir_value(builder, list_like, require_i64)]
|
| 1519 |
+
|
| 1520 |
+
|
| 1521 |
+
def make_block_ptr(base: tl.tensor, shape, strides, offsets, block_shape, order, builder: ir.builder) -> tl.tensor:
|
| 1522 |
+
# Convert dynamic arguments to IR values
|
| 1523 |
+
# NOTES(Chenggang): current `shape/strides` are `int64_t`, while `offsets/block_shape` are `int32_t`
|
| 1524 |
+
shape = _convert_to_ir_values(builder, shape)
|
| 1525 |
+
strides = _convert_to_ir_values(builder, strides)
|
| 1526 |
+
offsets = _convert_to_ir_values(builder, offsets, require_i64=False)
|
| 1527 |
+
|
| 1528 |
+
# Check `base` type
|
| 1529 |
+
if not base.type.is_ptr() or base.type.element_ty.is_block():
|
| 1530 |
+
raise ValueError("Expected `base` to be a pointer type (but not a block pointer type or others)")
|
| 1531 |
+
|
| 1532 |
+
# Treat `pointer_type<tl.int1>` as `pointer_type<tl.int8>`
|
| 1533 |
+
if base.type.element_ty == tl.int1:
|
| 1534 |
+
base = cast(base, tl.pointer_type(tl.int8, base.type.address_space), builder)
|
| 1535 |
+
|
| 1536 |
+
# Check whether `block_shape` is static
|
| 1537 |
+
if not hasattr(block_shape, "__iter__"):
|
| 1538 |
+
block_shape = [block_shape]
|
| 1539 |
+
block_shape = [elem.value if isinstance(elem, tl.constexpr) else elem for elem in block_shape]
|
| 1540 |
+
assert all([isinstance(elem, int) and -2**31 <= elem < 2**31 for elem in block_shape]), \
|
| 1541 |
+
"Expected a list of constant integers (`int32_t` range) in `block_shape`"
|
| 1542 |
+
|
| 1543 |
+
# Check `order`
|
| 1544 |
+
if not hasattr(order, "__iter__"):
|
| 1545 |
+
order = [order]
|
| 1546 |
+
order = [elem.value if isinstance(elem, tl.constexpr) else elem for elem in order]
|
| 1547 |
+
assert sorted(order) == list(range(len(order))), "Expected a permutation of (0, 1, ..., len(order)-1) in order"
|
| 1548 |
+
|
| 1549 |
+
# Must have same length
|
| 1550 |
+
assert all([len(block_shape) == len(list_like) for list_like in [shape, strides, offsets, order]]), \
|
| 1551 |
+
"Expected shape/strides/offsets/block_shape to have the same length"
|
| 1552 |
+
|
| 1553 |
+
# Build value, the type is:
|
| 1554 |
+
# `pointer_type<blocked<shape, element_type>>` in Python
|
| 1555 |
+
# `tt.ptr<tensor<shape, element_type>>` in MLIR
|
| 1556 |
+
handle = builder.create_make_block_ptr(base.handle, shape, strides, offsets, block_shape, order)
|
| 1557 |
+
return tl.tensor(handle, tl.pointer_type(tl.block_type(base.type.element_ty, block_shape)))
|
| 1558 |
+
|
| 1559 |
+
|
| 1560 |
+
def advance(base: tl.tensor, offsets, builder: ir.builder) -> tl.tensor:
|
| 1561 |
+
# Convert dynamic offsets to IR values
|
| 1562 |
+
offsets = _convert_to_ir_values(builder, offsets, require_i64=False)
|
| 1563 |
+
|
| 1564 |
+
# Advanced block pointer type is the same as before
|
| 1565 |
+
return tl.tensor(builder.create_advance(base.handle, offsets), base.type)
|
evalkit_cambrian/lib/python3.10/site-packages/triton/language/standard.py
ADDED
|
@@ -0,0 +1,404 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from ..runtime.jit import jit
|
| 4 |
+
from . import core, math
|
| 5 |
+
|
| 6 |
+
# -----------------------
|
| 7 |
+
# Standard library
|
| 8 |
+
# -----------------------
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@jit
|
| 12 |
+
def cdiv(x, div):
|
| 13 |
+
"""
|
| 14 |
+
Computes the ceiling division of :code:`x` by :code:`div`
|
| 15 |
+
|
| 16 |
+
:param x: the input number
|
| 17 |
+
:type x: Block
|
| 18 |
+
:param div: the divisor
|
| 19 |
+
:param div: Block
|
| 20 |
+
"""
|
| 21 |
+
return (x + div - 1) // div
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@jit
|
| 25 |
+
@core._add_math_1arg_docstr("sigmoid")
|
| 26 |
+
def sigmoid(x):
|
| 27 |
+
return 1 / (1 + core.exp(-x))
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@jit
|
| 31 |
+
@core._add_math_1arg_docstr("softmax")
|
| 32 |
+
def softmax(x, ieee_rounding=False):
|
| 33 |
+
z = x - max(x, 0)
|
| 34 |
+
num = core.exp(z)
|
| 35 |
+
den = sum(num, 0)
|
| 36 |
+
return core.fdiv(num, den, ieee_rounding)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@jit
|
| 40 |
+
def ravel(x):
|
| 41 |
+
"""
|
| 42 |
+
Returns a contiguous flattened view of :code:`x`.
|
| 43 |
+
|
| 44 |
+
:param x: the input tensor
|
| 45 |
+
:type x: Block
|
| 46 |
+
"""
|
| 47 |
+
return core.view(x, [x.numel])
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
@jit
|
| 51 |
+
def swizzle2d(i, j, size_i, size_j, size_g):
|
| 52 |
+
"""
|
| 53 |
+
Transforms indices of a row-major size_i*size_j matrix into those
|
| 54 |
+
of one where indices are row major for each group of size_j rows.
|
| 55 |
+
For example, for size_i = size_j = 4 and size_g = 2, it will transform
|
| 56 |
+
[[0 , 1 , 2 , 3 ],
|
| 57 |
+
[4 , 5 , 6 , 7 ],
|
| 58 |
+
[8 , 9 , 10, 11],
|
| 59 |
+
[12, 13, 14, 15]]
|
| 60 |
+
into
|
| 61 |
+
[[0, 2, 4 , 6 ],
|
| 62 |
+
[1, 3, 5 , 7 ],
|
| 63 |
+
[8, 10, 12, 14],
|
| 64 |
+
[9, 11, 13, 15]]
|
| 65 |
+
"""
|
| 66 |
+
# "unrolled index in array"
|
| 67 |
+
ij = i * size_j + j
|
| 68 |
+
# number of elements in `size_g` groups
|
| 69 |
+
# of `size_j` columns
|
| 70 |
+
size_gj = size_g * size_j
|
| 71 |
+
# index of the group in which (i,j) is
|
| 72 |
+
group_id = ij // size_gj
|
| 73 |
+
# row-index of the first element of this group
|
| 74 |
+
off_i = group_id * size_g
|
| 75 |
+
# last group may have fewer rows
|
| 76 |
+
size_g = minimum(size_i - off_i, size_g)
|
| 77 |
+
# new row and column indices
|
| 78 |
+
new_i = off_i + (ij % size_g)
|
| 79 |
+
new_j = (ij % size_gj) // size_g
|
| 80 |
+
return new_i, new_j
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
@jit
|
| 84 |
+
def zeros(shape, dtype):
|
| 85 |
+
"""
|
| 86 |
+
Returns a tensor filled with the scalar value 0 for the given :code:`shape` and :code:`dtype`.
|
| 87 |
+
|
| 88 |
+
:param shape: Shape of the new array, e.g., (8, 16) or (8, )
|
| 89 |
+
:type shape: tuple of ints
|
| 90 |
+
:param dtype: Data-type of the new array, e.g., :code:`tl.float16`
|
| 91 |
+
:type dtype: DType
|
| 92 |
+
"""
|
| 93 |
+
return core.full(shape, 0, dtype)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
@jit
|
| 97 |
+
def zeros_like(input):
|
| 98 |
+
return zeros(input.shape, input.dtype)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
@jit
|
| 102 |
+
def minimum(x, y):
|
| 103 |
+
"""
|
| 104 |
+
Computes the element-wise minimum of :code:`x` and :code:`y`.
|
| 105 |
+
|
| 106 |
+
:param input: the first input tensor
|
| 107 |
+
:type input: Block
|
| 108 |
+
:param other: the second input tensor
|
| 109 |
+
:type other: Block
|
| 110 |
+
"""
|
| 111 |
+
return math.min(x, y)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
@jit
|
| 115 |
+
def maximum(x, y):
|
| 116 |
+
"""
|
| 117 |
+
Computes the element-wise maximum of :code:`x` and :code:`y`.
|
| 118 |
+
|
| 119 |
+
:param input: the first input tensor
|
| 120 |
+
:type input: Block
|
| 121 |
+
:param other: the second input tensor
|
| 122 |
+
:type other: Block
|
| 123 |
+
"""
|
| 124 |
+
return math.max(x, y)
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
# max and argmax
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
@jit
|
| 131 |
+
def _argmax_combine(value1, index1, value2, index2, tie_break_left):
|
| 132 |
+
if tie_break_left:
|
| 133 |
+
tie = value1 == value2 and index1 < index2
|
| 134 |
+
else:
|
| 135 |
+
tie = False
|
| 136 |
+
gt = value1 > value2 or tie
|
| 137 |
+
v_ret = core.where(gt, value1, value2)
|
| 138 |
+
i_ret = core.where(gt, index1, index2)
|
| 139 |
+
return v_ret, i_ret
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
@jit
|
| 143 |
+
def _argmax_combine_tie_break_left(value1, index1, value2, index2):
|
| 144 |
+
return _argmax_combine(value1, index1, value2, index2, True)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
@jit
|
| 148 |
+
def _argmax_combine_tie_break_fast(value1, index1, value2, index2):
|
| 149 |
+
return _argmax_combine(value1, index1, value2, index2, False)
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
@jit
|
| 153 |
+
@core._add_reduction_docstr("maximum", return_indices_arg="return_indices",
|
| 154 |
+
tie_break_arg="return_indices_tie_break_left")
|
| 155 |
+
def max(input, axis=None, return_indices=False, return_indices_tie_break_left=True):
|
| 156 |
+
input = core._promote_reduction_input(input)
|
| 157 |
+
if return_indices:
|
| 158 |
+
if return_indices_tie_break_left:
|
| 159 |
+
return core._reduce_with_indices(input, axis, _argmax_combine_tie_break_left)
|
| 160 |
+
else:
|
| 161 |
+
return core._reduce_with_indices(input, axis, _argmax_combine_tie_break_fast)
|
| 162 |
+
else:
|
| 163 |
+
if core.constexpr(input.dtype.primitive_bitwidth) < core.constexpr(32):
|
| 164 |
+
if core.constexpr(input.dtype.is_floating()):
|
| 165 |
+
input = input.to(core.float32)
|
| 166 |
+
else:
|
| 167 |
+
assert input.dtype.is_integer_type()
|
| 168 |
+
input = input.to(core.int32)
|
| 169 |
+
return core.reduce(input, axis, maximum)
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
@jit
|
| 173 |
+
@core._add_reduction_docstr("maximum index", tie_break_arg="tie_break_left")
|
| 174 |
+
def argmax(input, axis, tie_break_left=True):
|
| 175 |
+
(_, ret) = max(input, axis, return_indices=True, return_indices_tie_break_left=tie_break_left)
|
| 176 |
+
return ret
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
# min and argmin
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
@jit
|
| 183 |
+
def _argmin_combine(value1, index1, value2, index2, tie_break_left):
|
| 184 |
+
if tie_break_left:
|
| 185 |
+
tie = value1 == value2 and index1 < index2
|
| 186 |
+
else:
|
| 187 |
+
tie = False
|
| 188 |
+
lt = value1 < value2 or tie
|
| 189 |
+
value_ret = core.where(lt, value1, value2)
|
| 190 |
+
index_ret = core.where(lt, index1, index2)
|
| 191 |
+
return value_ret, index_ret
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
@jit
|
| 195 |
+
def _argmin_combine_tie_break_left(value1, index1, value2, index2):
|
| 196 |
+
return _argmin_combine(value1, index1, value2, index2, True)
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
@jit
|
| 200 |
+
def _argmin_combine_tie_break_fast(value1, index1, value2, index2):
|
| 201 |
+
return _argmin_combine(value1, index1, value2, index2, False)
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
@jit
|
| 205 |
+
@core._add_reduction_docstr("minimum", return_indices_arg="return_indices",
|
| 206 |
+
tie_break_arg="return_indices_tie_break_left")
|
| 207 |
+
def min(input, axis=None, return_indices=False, return_indices_tie_break_left=True):
|
| 208 |
+
input = core._promote_reduction_input(input)
|
| 209 |
+
if return_indices:
|
| 210 |
+
if return_indices_tie_break_left:
|
| 211 |
+
return core._reduce_with_indices(input, axis, _argmin_combine_tie_break_left)
|
| 212 |
+
else:
|
| 213 |
+
return core._reduce_with_indices(input, axis, _argmin_combine_tie_break_fast)
|
| 214 |
+
else:
|
| 215 |
+
if core.constexpr(input.dtype.primitive_bitwidth) < 32:
|
| 216 |
+
if core.constexpr(input.dtype.is_floating()):
|
| 217 |
+
input = input.to(core.float32)
|
| 218 |
+
else:
|
| 219 |
+
assert input.dtype.is_integer_type()
|
| 220 |
+
input = input.to(core.int32)
|
| 221 |
+
return core.reduce(input, axis, minimum)
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
@jit
|
| 225 |
+
@core._add_reduction_docstr("minimum index", tie_break_arg="tie_break_left")
|
| 226 |
+
def argmin(input, axis, tie_break_left=True):
|
| 227 |
+
_, ret = min(input, axis, return_indices=True, return_indices_tie_break_left=tie_break_left)
|
| 228 |
+
return ret
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
@jit
|
| 232 |
+
def _sum_combine(a, b):
|
| 233 |
+
return a + b
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
# sum
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
@jit
|
| 240 |
+
@core._add_reduction_docstr("sum")
|
| 241 |
+
def sum(input, axis=None):
|
| 242 |
+
input = core._promote_reduction_input(input)
|
| 243 |
+
return core.reduce(input, axis, _sum_combine)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
@jit
|
| 247 |
+
def _xor_combine(a, b):
|
| 248 |
+
return a ^ b
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
# xor sum
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
@core.builtin
|
| 255 |
+
@core._add_reduction_docstr("xor sum")
|
| 256 |
+
def xor_sum(input, axis=None, _builder=None, _generator=None):
|
| 257 |
+
scalar_ty = input.type.scalar
|
| 258 |
+
if not scalar_ty.is_int():
|
| 259 |
+
raise ValueError("xor_sum only supported for integers")
|
| 260 |
+
|
| 261 |
+
input = core._promote_reduction_input(input, _builder=_builder)
|
| 262 |
+
return core.reduce(input, axis, _xor_combine, _builder=_builder, _generator=_generator)
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
# cumsum
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
@jit
|
| 269 |
+
@core._add_scan_docstr("cumsum")
|
| 270 |
+
def cumsum(input, axis=0):
|
| 271 |
+
# todo rename this to a generic function name
|
| 272 |
+
input = core._promote_reduction_input(input)
|
| 273 |
+
return core.associative_scan(input, axis, _sum_combine)
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
# cumprod
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
@jit
|
| 280 |
+
def _prod_combine(a, b):
|
| 281 |
+
return a * b
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
@jit
|
| 285 |
+
@core._add_scan_docstr("cumprod")
|
| 286 |
+
def cumprod(input, axis=0):
|
| 287 |
+
# todo rename this to a generic function name
|
| 288 |
+
input = core._promote_reduction_input(input)
|
| 289 |
+
return core.associative_scan(input, axis, _prod_combine)
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
# sort
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
@jit
|
| 296 |
+
def _indicator(n_dims: core.constexpr, idx: core.constexpr, pos: core.constexpr):
|
| 297 |
+
core.static_assert(idx < n_dims)
|
| 298 |
+
core.static_assert((pos == 0) or (pos == 1))
|
| 299 |
+
y = core.arange(0, 2)
|
| 300 |
+
if pos == 0:
|
| 301 |
+
y = 1 - y
|
| 302 |
+
|
| 303 |
+
for n in core.static_range(0, n_dims):
|
| 304 |
+
if n != n_dims - 1 - idx:
|
| 305 |
+
y = core.expand_dims(y, n)
|
| 306 |
+
return y
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
@jit
|
| 310 |
+
def _take_slice(x, n_dims: core.constexpr, idx: core.constexpr, pos: core.constexpr, keep_dim: core.constexpr = True):
|
| 311 |
+
y = sum(x * _indicator(n_dims, idx, pos), n_dims - 1 - idx)
|
| 312 |
+
if keep_dim:
|
| 313 |
+
y = core.expand_dims(y, n_dims - 1 - idx)
|
| 314 |
+
|
| 315 |
+
return y
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
@jit
|
| 319 |
+
def _compare_and_swap(x, desc_mask, n_dims: core.constexpr, idx: core.constexpr):
|
| 320 |
+
l = _take_slice(x, n_dims, idx, 0)
|
| 321 |
+
r = _take_slice(x, n_dims, idx, 1)
|
| 322 |
+
|
| 323 |
+
x_int = x
|
| 324 |
+
l_int = l
|
| 325 |
+
r_int = r
|
| 326 |
+
if x.dtype.is_floating():
|
| 327 |
+
if core.constexpr(x.dtype.primitive_bitwidth) == 16:
|
| 328 |
+
dtype_int = core.int16
|
| 329 |
+
elif core.constexpr(x.dtype.primitive_bitwidth) == 32:
|
| 330 |
+
dtype_int = core.int32
|
| 331 |
+
elif core.constexpr(x.dtype.primitive_bitwidth) == 64:
|
| 332 |
+
dtype_int = core.int64
|
| 333 |
+
else:
|
| 334 |
+
raise ValueError("Unsupported dtype")
|
| 335 |
+
x_int = x.to(dtype_int, bitcast=True)
|
| 336 |
+
l_int = l.to(dtype_int, bitcast=True)
|
| 337 |
+
r_int = r.to(dtype_int, bitcast=True)
|
| 338 |
+
desc_mask = desc_mask.to(x_int.dtype)
|
| 339 |
+
zero = zeros_like(x_int)
|
| 340 |
+
y = x_int ^ core.where((l > r) ^ desc_mask, l_int ^ r_int, zero)
|
| 341 |
+
y = y.to(x.dtype, bitcast=True)
|
| 342 |
+
return y
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
@jit
|
| 346 |
+
def _bitonic_merge(x, n_dims: core.constexpr, active_dims: core.constexpr, order_type: core.constexpr):
|
| 347 |
+
'''
|
| 348 |
+
order_type 0 == ascending
|
| 349 |
+
order_type 1 == descending
|
| 350 |
+
order_type 2 == alternating
|
| 351 |
+
'''
|
| 352 |
+
core.static_assert(active_dims <= n_dims)
|
| 353 |
+
|
| 354 |
+
if order_type == 2:
|
| 355 |
+
desc_mask = _indicator(n_dims, active_dims, 1)
|
| 356 |
+
else:
|
| 357 |
+
desc_mask = order_type
|
| 358 |
+
|
| 359 |
+
for i in core.static_range(active_dims):
|
| 360 |
+
x = _compare_and_swap(x, desc_mask, n_dims, active_dims - 1 - i)
|
| 361 |
+
|
| 362 |
+
return x
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
def _log2(i: core.constexpr):
|
| 366 |
+
log2 = 0
|
| 367 |
+
n = i.value
|
| 368 |
+
while n > 1:
|
| 369 |
+
n >>= 1
|
| 370 |
+
log2 += 1
|
| 371 |
+
return core.constexpr(log2)
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
def _is_power_of_two(i: core.constexpr):
|
| 375 |
+
n = i.value
|
| 376 |
+
return core.constexpr((n & (n - 1)) == 0 and n != 0)
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
def _unwrap_if_constexpr(o):
|
| 380 |
+
return o.value if isinstance(o, core.constexpr) else o
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
def _get_sort_dim(dim, shape):
|
| 384 |
+
dim = _unwrap_if_constexpr(dim)
|
| 385 |
+
shape = _unwrap_if_constexpr(shape)
|
| 386 |
+
if dim is None:
|
| 387 |
+
dim = len(shape) - 1
|
| 388 |
+
assert dim == len(shape) - 1, "Currently only support sorting on the last dimension"
|
| 389 |
+
return core.constexpr(dim)
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
@jit
|
| 393 |
+
def sort(x, dim=None, descending: core.constexpr = 0):
|
| 394 |
+
core.static_assert(_is_power_of_two(x.shape[_get_sort_dim(dim, x.shape)]))
|
| 395 |
+
core.static_assert(_is_power_of_two(x.numel))
|
| 396 |
+
# reshape the tensor to have all dimensions be 2.
|
| 397 |
+
# TODO: We shouldn't have to change the dimensions not sorted.
|
| 398 |
+
y = core.reshape(x, [2] * _log2(x.numel))
|
| 399 |
+
for i in core.static_range(1, _log2(x.shape[_get_sort_dim(dim, x.shape)]) + 1):
|
| 400 |
+
y = _bitonic_merge(y, _log2(x.numel), i, (descending if
|
| 401 |
+
(i == _log2(x.shape[_get_sort_dim(dim, x.shape)])) else 2))
|
| 402 |
+
|
| 403 |
+
x = core.reshape(y, x.shape)
|
| 404 |
+
return x
|
evalkit_cambrian/lib/python3.10/site-packages/triton/runtime/__init__.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .autotuner import (Autotuner, Config, Heuristics, OutOfResources, autotune, heuristics)
|
| 2 |
+
from .driver import driver
|
| 3 |
+
from .jit import JITFunction, KernelInterface, MockTensor, TensorWrapper, reinterpret
|
| 4 |
+
|
| 5 |
+
__all__ = [
|
| 6 |
+
"driver",
|
| 7 |
+
"Config",
|
| 8 |
+
"Heuristics",
|
| 9 |
+
"autotune",
|
| 10 |
+
"heuristics",
|
| 11 |
+
"JITFunction",
|
| 12 |
+
"KernelInterface",
|
| 13 |
+
"reinterpret",
|
| 14 |
+
"TensorWrapper",
|
| 15 |
+
"OutOfResources",
|
| 16 |
+
"MockTensor",
|
| 17 |
+
"Autotuner",
|
| 18 |
+
]
|
evalkit_cambrian/lib/python3.10/site-packages/triton/runtime/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (564 Bytes). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/triton/runtime/__pycache__/cache.cpython-310.pyc
ADDED
|
Binary file (5.08 kB). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/triton/runtime/__pycache__/driver.cpython-310.pyc
ADDED
|
Binary file (6.19 kB). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/triton/runtime/__pycache__/errors.cpython-310.pyc
ADDED
|
Binary file (905 Bytes). View file
|
|
|
evalkit_cambrian/lib/python3.10/site-packages/triton/runtime/__pycache__/interpreter.cpython-310.pyc
ADDED
|
Binary file (25.7 kB). View file
|
|
|