ZTWHHH commited on
Commit
3662621
·
verified ·
1 Parent(s): c835f15

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. parrot/lib/python3.10/site-packages/certifi/__init__.py +4 -0
  2. parrot/lib/python3.10/site-packages/certifi/__pycache__/__init__.cpython-310.pyc +0 -0
  3. parrot/lib/python3.10/site-packages/certifi/core.py +114 -0
  4. parrot/lib/python3.10/site-packages/certifi/py.typed +0 -0
  5. parrot/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_filters.cpython-310.pyc +0 -0
  6. parrot/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_measurements.cpython-310.pyc +0 -0
  7. parrot/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_ni_docstrings.cpython-310.pyc +0 -0
  8. parrot/lib/python3.10/site-packages/scipy/ndimage/__pycache__/interpolation.cpython-310.pyc +0 -0
  9. parrot/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  10. parrot/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_c_api.cpython-310.pyc +0 -0
  11. parrot/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_datatypes.cpython-310.pyc +0 -0
  12. parrot/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_filters.cpython-310.pyc +0 -0
  13. parrot/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_fourier.cpython-310.pyc +0 -0
  14. parrot/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_interpolation.cpython-310.pyc +0 -0
  15. parrot/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_measurements.cpython-310.pyc +0 -0
  16. parrot/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_morphology.cpython-310.pyc +0 -0
  17. parrot/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_splines.cpython-310.pyc +0 -0
  18. parrot/lib/python3.10/site-packages/scipy/ndimage/tests/data/label_inputs.txt +21 -0
  19. parrot/lib/python3.10/site-packages/scipy/ndimage/tests/data/label_results.txt +294 -0
  20. parrot/lib/python3.10/site-packages/scipy/ndimage/tests/data/label_strels.txt +42 -0
  21. parrot/lib/python3.10/site-packages/six-1.16.0.dist-info/WHEEL +6 -0
  22. parrot/lib/python3.10/site-packages/sty-1.0.6.dist-info/RECORD +19 -0
  23. parrot/lib/python3.10/site-packages/typer-0.12.5.dist-info/RECORD +39 -0
  24. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/annotate_warns.h +11 -0
  25. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/autocast.h +15 -0
  26. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/bailout_graph.h +34 -0
  27. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_pooling.h +11 -0
  28. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_linear_bn.h +29 -0
  29. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/freeze_module.h +36 -0
  30. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_folding.h +24 -0
  31. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_graph_optimizations.h +22 -0
  32. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_relu.h +11 -0
  33. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inplace_check.h +11 -0
  34. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/integer_value_refinement.h +12 -0
  35. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/loop_unrolling.h +36 -0
  36. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/metal_rewrite.h +17 -0
  37. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/prepack_folding.h +17 -0
  38. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/dedup_module_uses.h +28 -0
  39. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/finalize.h +63 -0
  40. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/insert_quant_dequant.h +46 -0
  41. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/quantization_patterns.h +1272 -0
  42. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/quantization_type.h +15 -0
  43. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/register_packed_params.h +20 -0
  44. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/specialize_autogradzero.h +21 -0
  45. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_cache.h +57 -0
  46. videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/value_refinement_utils.h +81 -0
  47. vllm/lib/python3.10/site-packages/cupy/_manipulation/__init__.py +2 -0
  48. vllm/lib/python3.10/site-packages/cupy/_manipulation/join.py +152 -0
  49. vllm/lib/python3.10/site-packages/cupy/_manipulation/kind.py +122 -0
  50. vllm/lib/python3.10/site-packages/cupy/_manipulation/split.py +91 -0
parrot/lib/python3.10/site-packages/certifi/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .core import contents, where
2
+
3
+ __all__ = ["contents", "where"]
4
+ __version__ = "2024.08.30"
parrot/lib/python3.10/site-packages/certifi/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (272 Bytes). View file
 
parrot/lib/python3.10/site-packages/certifi/core.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ certifi.py
3
+ ~~~~~~~~~~
4
+
5
+ This module returns the installation location of cacert.pem or its contents.
6
+ """
7
+ import sys
8
+ import atexit
9
+
10
+ def exit_cacert_ctx() -> None:
11
+ _CACERT_CTX.__exit__(None, None, None) # type: ignore[union-attr]
12
+
13
+
14
+ if sys.version_info >= (3, 11):
15
+
16
+ from importlib.resources import as_file, files
17
+
18
+ _CACERT_CTX = None
19
+ _CACERT_PATH = None
20
+
21
+ def where() -> str:
22
+ # This is slightly terrible, but we want to delay extracting the file
23
+ # in cases where we're inside of a zipimport situation until someone
24
+ # actually calls where(), but we don't want to re-extract the file
25
+ # on every call of where(), so we'll do it once then store it in a
26
+ # global variable.
27
+ global _CACERT_CTX
28
+ global _CACERT_PATH
29
+ if _CACERT_PATH is None:
30
+ # This is slightly janky, the importlib.resources API wants you to
31
+ # manage the cleanup of this file, so it doesn't actually return a
32
+ # path, it returns a context manager that will give you the path
33
+ # when you enter it and will do any cleanup when you leave it. In
34
+ # the common case of not needing a temporary file, it will just
35
+ # return the file system location and the __exit__() is a no-op.
36
+ #
37
+ # We also have to hold onto the actual context manager, because
38
+ # it will do the cleanup whenever it gets garbage collected, so
39
+ # we will also store that at the global level as well.
40
+ _CACERT_CTX = as_file(files("certifi").joinpath("cacert.pem"))
41
+ _CACERT_PATH = str(_CACERT_CTX.__enter__())
42
+ atexit.register(exit_cacert_ctx)
43
+
44
+ return _CACERT_PATH
45
+
46
+ def contents() -> str:
47
+ return files("certifi").joinpath("cacert.pem").read_text(encoding="ascii")
48
+
49
+ elif sys.version_info >= (3, 7):
50
+
51
+ from importlib.resources import path as get_path, read_text
52
+
53
+ _CACERT_CTX = None
54
+ _CACERT_PATH = None
55
+
56
+ def where() -> str:
57
+ # This is slightly terrible, but we want to delay extracting the
58
+ # file in cases where we're inside of a zipimport situation until
59
+ # someone actually calls where(), but we don't want to re-extract
60
+ # the file on every call of where(), so we'll do it once then store
61
+ # it in a global variable.
62
+ global _CACERT_CTX
63
+ global _CACERT_PATH
64
+ if _CACERT_PATH is None:
65
+ # This is slightly janky, the importlib.resources API wants you
66
+ # to manage the cleanup of this file, so it doesn't actually
67
+ # return a path, it returns a context manager that will give
68
+ # you the path when you enter it and will do any cleanup when
69
+ # you leave it. In the common case of not needing a temporary
70
+ # file, it will just return the file system location and the
71
+ # __exit__() is a no-op.
72
+ #
73
+ # We also have to hold onto the actual context manager, because
74
+ # it will do the cleanup whenever it gets garbage collected, so
75
+ # we will also store that at the global level as well.
76
+ _CACERT_CTX = get_path("certifi", "cacert.pem")
77
+ _CACERT_PATH = str(_CACERT_CTX.__enter__())
78
+ atexit.register(exit_cacert_ctx)
79
+
80
+ return _CACERT_PATH
81
+
82
+ def contents() -> str:
83
+ return read_text("certifi", "cacert.pem", encoding="ascii")
84
+
85
+ else:
86
+ import os
87
+ import types
88
+ from typing import Union
89
+
90
+ Package = Union[types.ModuleType, str]
91
+ Resource = Union[str, "os.PathLike"]
92
+
93
+ # This fallback will work for Python versions prior to 3.7 that lack the
94
+ # importlib.resources module but relies on the existing `where` function
95
+ # so won't address issues with environments like PyOxidizer that don't set
96
+ # __file__ on modules.
97
+ def read_text(
98
+ package: Package,
99
+ resource: Resource,
100
+ encoding: str = 'utf-8',
101
+ errors: str = 'strict'
102
+ ) -> str:
103
+ with open(where(), encoding=encoding) as data:
104
+ return data.read()
105
+
106
+ # If we don't have importlib.resources, then we will just do the old logic
107
+ # of assuming we're on the filesystem and munge the path directly.
108
+ def where() -> str:
109
+ f = os.path.dirname(__file__)
110
+
111
+ return os.path.join(f, "cacert.pem")
112
+
113
+ def contents() -> str:
114
+ return read_text("certifi", "cacert.pem", encoding="ascii")
parrot/lib/python3.10/site-packages/certifi/py.typed ADDED
File without changes
parrot/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_filters.cpython-310.pyc ADDED
Binary file (52.4 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_measurements.cpython-310.pyc ADDED
Binary file (47.6 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_ni_docstrings.cpython-310.pyc ADDED
Binary file (8.31 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/ndimage/__pycache__/interpolation.cpython-310.pyc ADDED
Binary file (708 Bytes). View file
 
parrot/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (550 Bytes). View file
 
parrot/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_c_api.cpython-310.pyc ADDED
Binary file (4.13 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_datatypes.cpython-310.pyc ADDED
Binary file (1.88 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_filters.cpython-310.pyc ADDED
Binary file (64.5 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_fourier.cpython-310.pyc ADDED
Binary file (5.16 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_interpolation.cpython-310.pyc ADDED
Binary file (46.8 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_measurements.cpython-310.pyc ADDED
Binary file (36.6 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_morphology.cpython-310.pyc ADDED
Binary file (59.9 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/ndimage/tests/__pycache__/test_splines.cpython-310.pyc ADDED
Binary file (1.99 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/ndimage/tests/data/label_inputs.txt ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1 1 1 1 1 1 1
2
+ 1 1 1 1 1 1 1
3
+ 1 1 1 1 1 1 1
4
+ 1 1 1 1 1 1 1
5
+ 1 1 1 1 1 1 1
6
+ 1 1 1 1 1 1 1
7
+ 1 1 1 1 1 1 1
8
+ 1 1 1 0 1 1 1
9
+ 1 1 0 0 0 1 1
10
+ 1 0 1 0 1 0 1
11
+ 0 0 0 1 0 0 0
12
+ 1 0 1 0 1 0 1
13
+ 1 1 0 0 0 1 1
14
+ 1 1 1 0 1 1 1
15
+ 1 0 1 1 1 0 1
16
+ 0 0 0 1 0 0 0
17
+ 1 0 0 1 0 0 1
18
+ 1 1 1 1 1 1 1
19
+ 1 0 0 1 0 0 1
20
+ 0 0 0 1 0 0 0
21
+ 1 0 1 1 1 0 1
parrot/lib/python3.10/site-packages/scipy/ndimage/tests/data/label_results.txt ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1 1 1 1 1 1 1
2
+ 1 1 1 1 1 1 1
3
+ 1 1 1 1 1 1 1
4
+ 1 1 1 1 1 1 1
5
+ 1 1 1 1 1 1 1
6
+ 1 1 1 1 1 1 1
7
+ 1 1 1 1 1 1 1
8
+ 1 1 1 1 1 1 1
9
+ 1 1 1 1 1 1 1
10
+ 1 1 1 1 1 1 1
11
+ 1 1 1 1 1 1 1
12
+ 1 1 1 1 1 1 1
13
+ 1 1 1 1 1 1 1
14
+ 1 1 1 1 1 1 1
15
+ 1 1 1 1 1 1 1
16
+ 2 2 2 2 2 2 2
17
+ 3 3 3 3 3 3 3
18
+ 4 4 4 4 4 4 4
19
+ 5 5 5 5 5 5 5
20
+ 6 6 6 6 6 6 6
21
+ 7 7 7 7 7 7 7
22
+ 1 1 1 1 1 1 1
23
+ 1 1 1 1 1 1 1
24
+ 1 1 1 1 1 1 1
25
+ 1 1 1 1 1 1 1
26
+ 1 1 1 1 1 1 1
27
+ 1 1 1 1 1 1 1
28
+ 1 1 1 1 1 1 1
29
+ 1 2 3 4 5 6 7
30
+ 8 9 10 11 12 13 14
31
+ 15 16 17 18 19 20 21
32
+ 22 23 24 25 26 27 28
33
+ 29 30 31 32 33 34 35
34
+ 36 37 38 39 40 41 42
35
+ 43 44 45 46 47 48 49
36
+ 1 1 1 1 1 1 1
37
+ 1 1 1 1 1 1 1
38
+ 1 1 1 1 1 1 1
39
+ 1 1 1 1 1 1 1
40
+ 1 1 1 1 1 1 1
41
+ 1 1 1 1 1 1 1
42
+ 1 1 1 1 1 1 1
43
+ 1 1 1 1 1 1 1
44
+ 1 1 1 1 1 1 1
45
+ 1 1 1 1 1 1 1
46
+ 1 1 1 1 1 1 1
47
+ 1 1 1 1 1 1 1
48
+ 1 1 1 1 1 1 1
49
+ 1 1 1 1 1 1 1
50
+ 1 2 3 4 5 6 7
51
+ 8 1 2 3 4 5 6
52
+ 9 8 1 2 3 4 5
53
+ 10 9 8 1 2 3 4
54
+ 11 10 9 8 1 2 3
55
+ 12 11 10 9 8 1 2
56
+ 13 12 11 10 9 8 1
57
+ 1 2 3 4 5 6 7
58
+ 1 2 3 4 5 6 7
59
+ 1 2 3 4 5 6 7
60
+ 1 2 3 4 5 6 7
61
+ 1 2 3 4 5 6 7
62
+ 1 2 3 4 5 6 7
63
+ 1 2 3 4 5 6 7
64
+ 1 1 1 1 1 1 1
65
+ 1 1 1 1 1 1 1
66
+ 1 1 1 1 1 1 1
67
+ 1 1 1 1 1 1 1
68
+ 1 1 1 1 1 1 1
69
+ 1 1 1 1 1 1 1
70
+ 1 1 1 1 1 1 1
71
+ 1 1 1 1 1 1 1
72
+ 1 1 1 1 1 1 1
73
+ 1 1 1 1 1 1 1
74
+ 1 1 1 1 1 1 1
75
+ 1 1 1 1 1 1 1
76
+ 1 1 1 1 1 1 1
77
+ 1 1 1 1 1 1 1
78
+ 1 2 1 2 1 2 1
79
+ 2 1 2 1 2 1 2
80
+ 1 2 1 2 1 2 1
81
+ 2 1 2 1 2 1 2
82
+ 1 2 1 2 1 2 1
83
+ 2 1 2 1 2 1 2
84
+ 1 2 1 2 1 2 1
85
+ 1 2 3 4 5 6 7
86
+ 2 3 4 5 6 7 8
87
+ 3 4 5 6 7 8 9
88
+ 4 5 6 7 8 9 10
89
+ 5 6 7 8 9 10 11
90
+ 6 7 8 9 10 11 12
91
+ 7 8 9 10 11 12 13
92
+ 1 1 1 1 1 1 1
93
+ 1 1 1 1 1 1 1
94
+ 1 1 1 1 1 1 1
95
+ 1 1 1 1 1 1 1
96
+ 1 1 1 1 1 1 1
97
+ 1 1 1 1 1 1 1
98
+ 1 1 1 1 1 1 1
99
+ 1 1 1 0 2 2 2
100
+ 1 1 0 0 0 2 2
101
+ 1 0 3 0 2 0 4
102
+ 0 0 0 2 0 0 0
103
+ 5 0 2 0 6 0 7
104
+ 2 2 0 0 0 7 7
105
+ 2 2 2 0 7 7 7
106
+ 1 1 1 0 2 2 2
107
+ 1 1 0 0 0 2 2
108
+ 3 0 1 0 4 0 2
109
+ 0 0 0 1 0 0 0
110
+ 5 0 6 0 1 0 7
111
+ 5 5 0 0 0 1 1
112
+ 5 5 5 0 1 1 1
113
+ 1 1 1 0 2 2 2
114
+ 3 3 0 0 0 4 4
115
+ 5 0 6 0 7 0 8
116
+ 0 0 0 9 0 0 0
117
+ 10 0 11 0 12 0 13
118
+ 14 14 0 0 0 15 15
119
+ 16 16 16 0 17 17 17
120
+ 1 1 1 0 2 3 3
121
+ 1 1 0 0 0 3 3
122
+ 1 0 4 0 3 0 3
123
+ 0 0 0 3 0 0 0
124
+ 3 0 3 0 5 0 6
125
+ 3 3 0 0 0 6 6
126
+ 3 3 7 0 6 6 6
127
+ 1 2 3 0 4 5 6
128
+ 7 8 0 0 0 9 10
129
+ 11 0 12 0 13 0 14
130
+ 0 0 0 15 0 0 0
131
+ 16 0 17 0 18 0 19
132
+ 20 21 0 0 0 22 23
133
+ 24 25 26 0 27 28 29
134
+ 1 1 1 0 2 2 2
135
+ 1 1 0 0 0 2 2
136
+ 1 0 3 0 2 0 2
137
+ 0 0 0 2 0 0 0
138
+ 2 0 2 0 4 0 5
139
+ 2 2 0 0 0 5 5
140
+ 2 2 2 0 5 5 5
141
+ 1 1 1 0 2 2 2
142
+ 1 1 0 0 0 2 2
143
+ 1 0 3 0 4 0 2
144
+ 0 0 0 5 0 0 0
145
+ 6 0 7 0 8 0 9
146
+ 6 6 0 0 0 9 9
147
+ 6 6 6 0 9 9 9
148
+ 1 2 3 0 4 5 6
149
+ 7 1 0 0 0 4 5
150
+ 8 0 1 0 9 0 4
151
+ 0 0 0 1 0 0 0
152
+ 10 0 11 0 1 0 12
153
+ 13 10 0 0 0 1 14
154
+ 15 13 10 0 16 17 1
155
+ 1 2 3 0 4 5 6
156
+ 1 2 0 0 0 5 6
157
+ 1 0 7 0 8 0 6
158
+ 0 0 0 9 0 0 0
159
+ 10 0 11 0 12 0 13
160
+ 10 14 0 0 0 15 13
161
+ 10 14 16 0 17 15 13
162
+ 1 1 1 0 1 1 1
163
+ 1 1 0 0 0 1 1
164
+ 1 0 1 0 1 0 1
165
+ 0 0 0 1 0 0 0
166
+ 1 0 1 0 1 0 1
167
+ 1 1 0 0 0 1 1
168
+ 1 1 1 0 1 1 1
169
+ 1 1 2 0 3 3 3
170
+ 1 1 0 0 0 3 3
171
+ 1 0 1 0 4 0 3
172
+ 0 0 0 1 0 0 0
173
+ 5 0 6 0 1 0 1
174
+ 5 5 0 0 0 1 1
175
+ 5 5 5 0 7 1 1
176
+ 1 2 1 0 1 3 1
177
+ 2 1 0 0 0 1 3
178
+ 1 0 1 0 1 0 1
179
+ 0 0 0 1 0 0 0
180
+ 1 0 1 0 1 0 1
181
+ 4 1 0 0 0 1 5
182
+ 1 4 1 0 1 5 1
183
+ 1 2 3 0 4 5 6
184
+ 2 3 0 0 0 6 7
185
+ 3 0 8 0 6 0 9
186
+ 0 0 0 6 0 0 0
187
+ 10 0 6 0 11 0 12
188
+ 13 6 0 0 0 12 14
189
+ 6 15 16 0 12 14 17
190
+ 1 1 1 0 2 2 2
191
+ 1 1 0 0 0 2 2
192
+ 1 0 1 0 3 0 2
193
+ 0 0 0 1 0 0 0
194
+ 4 0 5 0 1 0 1
195
+ 4 4 0 0 0 1 1
196
+ 4 4 4 0 1 1 1
197
+ 1 0 2 2 2 0 3
198
+ 0 0 0 2 0 0 0
199
+ 4 0 0 5 0 0 5
200
+ 5 5 5 5 5 5 5
201
+ 5 0 0 5 0 0 6
202
+ 0 0 0 7 0 0 0
203
+ 8 0 7 7 7 0 9
204
+ 1 0 2 2 2 0 3
205
+ 0 0 0 2 0 0 0
206
+ 4 0 0 4 0 0 5
207
+ 4 4 4 4 4 4 4
208
+ 6 0 0 4 0 0 4
209
+ 0 0 0 7 0 0 0
210
+ 8 0 7 7 7 0 9
211
+ 1 0 2 2 2 0 3
212
+ 0 0 0 4 0 0 0
213
+ 5 0 0 6 0 0 7
214
+ 8 8 8 8 8 8 8
215
+ 9 0 0 10 0 0 11
216
+ 0 0 0 12 0 0 0
217
+ 13 0 14 14 14 0 15
218
+ 1 0 2 3 3 0 4
219
+ 0 0 0 3 0 0 0
220
+ 5 0 0 3 0 0 6
221
+ 5 5 3 3 3 6 6
222
+ 5 0 0 3 0 0 6
223
+ 0 0 0 3 0 0 0
224
+ 7 0 3 3 8 0 9
225
+ 1 0 2 3 4 0 5
226
+ 0 0 0 6 0 0 0
227
+ 7 0 0 8 0 0 9
228
+ 10 11 12 13 14 15 16
229
+ 17 0 0 18 0 0 19
230
+ 0 0 0 20 0 0 0
231
+ 21 0 22 23 24 0 25
232
+ 1 0 2 2 2 0 3
233
+ 0 0 0 2 0 0 0
234
+ 2 0 0 2 0 0 2
235
+ 2 2 2 2 2 2 2
236
+ 2 0 0 2 0 0 2
237
+ 0 0 0 2 0 0 0
238
+ 4 0 2 2 2 0 5
239
+ 1 0 2 2 2 0 3
240
+ 0 0 0 2 0 0 0
241
+ 2 0 0 2 0 0 2
242
+ 2 2 2 2 2 2 2
243
+ 2 0 0 2 0 0 2
244
+ 0 0 0 2 0 0 0
245
+ 4 0 2 2 2 0 5
246
+ 1 0 2 3 4 0 5
247
+ 0 0 0 2 0 0 0
248
+ 6 0 0 7 0 0 8
249
+ 9 6 10 11 7 12 13
250
+ 14 0 0 10 0 0 12
251
+ 0 0 0 15 0 0 0
252
+ 16 0 17 18 15 0 19
253
+ 1 0 2 3 4 0 5
254
+ 0 0 0 3 0 0 0
255
+ 6 0 0 3 0 0 7
256
+ 6 8 9 3 10 11 7
257
+ 6 0 0 3 0 0 7
258
+ 0 0 0 3 0 0 0
259
+ 12 0 13 3 14 0 15
260
+ 1 0 2 2 2 0 3
261
+ 0 0 0 2 0 0 0
262
+ 2 0 0 2 0 0 2
263
+ 2 2 2 2 2 2 2
264
+ 2 0 0 2 0 0 2
265
+ 0 0 0 2 0 0 0
266
+ 4 0 2 2 2 0 5
267
+ 1 0 2 2 3 0 4
268
+ 0 0 0 2 0 0 0
269
+ 5 0 0 2 0 0 6
270
+ 5 5 2 2 2 6 6
271
+ 5 0 0 2 0 0 6
272
+ 0 0 0 2 0 0 0
273
+ 7 0 8 2 2 0 9
274
+ 1 0 2 3 2 0 4
275
+ 0 0 0 2 0 0 0
276
+ 5 0 0 6 0 0 7
277
+ 8 5 6 9 6 7 10
278
+ 5 0 0 6 0 0 7
279
+ 0 0 0 11 0 0 0
280
+ 12 0 11 13 11 0 14
281
+ 1 0 2 3 4 0 5
282
+ 0 0 0 4 0 0 0
283
+ 6 0 0 7 0 0 8
284
+ 9 10 7 11 12 8 13
285
+ 10 0 0 12 0 0 14
286
+ 0 0 0 15 0 0 0
287
+ 16 0 15 17 18 0 19
288
+ 1 0 2 2 2 0 3
289
+ 0 0 0 2 0 0 0
290
+ 2 0 0 2 0 0 2
291
+ 2 2 2 2 2 2 2
292
+ 2 0 0 2 0 0 2
293
+ 0 0 0 2 0 0 0
294
+ 4 0 2 2 2 0 5
parrot/lib/python3.10/site-packages/scipy/ndimage/tests/data/label_strels.txt ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 0 0 1
2
+ 1 1 1
3
+ 1 0 0
4
+ 1 0 0
5
+ 1 1 1
6
+ 0 0 1
7
+ 0 0 0
8
+ 1 1 1
9
+ 0 0 0
10
+ 0 1 1
11
+ 0 1 0
12
+ 1 1 0
13
+ 0 0 0
14
+ 0 0 0
15
+ 0 0 0
16
+ 0 1 1
17
+ 1 1 1
18
+ 1 1 0
19
+ 0 1 0
20
+ 1 1 1
21
+ 0 1 0
22
+ 1 0 0
23
+ 0 1 0
24
+ 0 0 1
25
+ 0 1 0
26
+ 0 1 0
27
+ 0 1 0
28
+ 1 1 1
29
+ 1 1 1
30
+ 1 1 1
31
+ 1 1 0
32
+ 0 1 0
33
+ 0 1 1
34
+ 1 0 1
35
+ 0 1 0
36
+ 1 0 1
37
+ 0 0 1
38
+ 0 1 0
39
+ 1 0 0
40
+ 1 1 0
41
+ 1 1 1
42
+ 0 1 1
parrot/lib/python3.10/site-packages/six-1.16.0.dist-info/WHEEL ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.36.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py2-none-any
5
+ Tag: py3-none-any
6
+
parrot/lib/python3.10/site-packages/sty-1.0.6.dist-info/RECORD ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ sty-1.0.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ sty-1.0.6.dist-info/LICENSE,sha256=BKoNNYR-GsFjo_h-G-iR7aGvB-mrYRILDiZtQ8lRpyQ,11360
3
+ sty-1.0.6.dist-info/METADATA,sha256=LMYR-yofgQIwfqSC55FEbrEipUMlgbGkZltn4P0Nvqg,5709
4
+ sty-1.0.6.dist-info/RECORD,,
5
+ sty-1.0.6.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ sty-1.0.6.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
7
+ sty/__init__.py,sha256=0H8MFmXLlV3h-9d-qooRFTUlyicnHGTreSDPuhzAtYk,1280
8
+ sty/__pycache__/__init__.cpython-310.pyc,,
9
+ sty/__pycache__/lib.cpython-310.pyc,,
10
+ sty/__pycache__/primitive.cpython-310.pyc,,
11
+ sty/__pycache__/register.cpython-310.pyc,,
12
+ sty/__pycache__/renderfunc.cpython-310.pyc,,
13
+ sty/__pycache__/rendertype.cpython-310.pyc,,
14
+ sty/lib.py,sha256=j9VDuAV4IvXe1QNXw75TZIcFQeh4wrGhIF7jJQAP2QU,1010
15
+ sty/primitive.py,sha256=5XXh8U6Az56TrxXSeXASbkMhCtTxpSI4ix4Y38QWb9g,6287
16
+ sty/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
+ sty/register.py,sha256=PBZsg5A32Hzfqc1wODaFxOZalP78iEPUYQMu5tk4FXk,5818
18
+ sty/renderfunc.py,sha256=_4-x0m40PeADw6QuQzBBogSuFibbPH0uRORgAxTccMY,938
19
+ sty/rendertype.py,sha256=fZRsIErx2f_5d9_MJMAblCUnduUdeDmENt6Pk-prXWQ,1579
parrot/lib/python3.10/site-packages/typer-0.12.5.dist-info/RECORD ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ../../../bin/typer,sha256=UcjgIlQP2WhaSXX5wnMiCOWCEZVlW13RJ5s0Zgz7nZc,220
2
+ typer-0.12.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
3
+ typer-0.12.5.dist-info/METADATA,sha256=H4-wCdYSIVGVK2BG0Uo5WW_CbFtg3LcTgbddb8YWYpM,15670
4
+ typer-0.12.5.dist-info/RECORD,,
5
+ typer-0.12.5.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ typer-0.12.5.dist-info/WHEEL,sha256=rSwsxJWe3vzyR5HCwjWXQruDgschpei4h_giTm0dJVE,90
7
+ typer-0.12.5.dist-info/entry_points.txt,sha256=-ETju9aHkoyAeEB4v005yjs1EswpSbYeHxi4Y1n8pm4,42
8
+ typer-0.12.5.dist-info/licenses/LICENSE,sha256=WJks68-N-25AxOIRLtEhJsJDZm3KORKj14t-ysSFnUk,1086
9
+ typer/__init__.py,sha256=hDym0jC4ZAWOtb6RI-JOMfMRyzIzSroPxGDSGiOeQs8,1603
10
+ typer/__main__.py,sha256=bYt9eEaoRQWdejEHFD8REx9jxVEdZptECFsV7F49Ink,30
11
+ typer/__pycache__/__init__.cpython-310.pyc,,
12
+ typer/__pycache__/__main__.cpython-310.pyc,,
13
+ typer/__pycache__/_completion_classes.cpython-310.pyc,,
14
+ typer/__pycache__/_completion_shared.cpython-310.pyc,,
15
+ typer/__pycache__/_typing.cpython-310.pyc,,
16
+ typer/__pycache__/cli.cpython-310.pyc,,
17
+ typer/__pycache__/colors.cpython-310.pyc,,
18
+ typer/__pycache__/completion.cpython-310.pyc,,
19
+ typer/__pycache__/core.cpython-310.pyc,,
20
+ typer/__pycache__/main.cpython-310.pyc,,
21
+ typer/__pycache__/models.cpython-310.pyc,,
22
+ typer/__pycache__/params.cpython-310.pyc,,
23
+ typer/__pycache__/rich_utils.cpython-310.pyc,,
24
+ typer/__pycache__/testing.cpython-310.pyc,,
25
+ typer/__pycache__/utils.cpython-310.pyc,,
26
+ typer/_completion_classes.py,sha256=FUbWj_PakY4yqeKOA3NBCVNHPinndy4GO5UkFLiL3vE,6721
27
+ typer/_completion_shared.py,sha256=3OzRdoyn_Z3uQ_JBcJBsQShv8eaogy36Yf1dhFlK-t4,8757
28
+ typer/_typing.py,sha256=7vt_zCpyS2VwQdzhlor-PhvSCJ6g18ZXGPI1MNEoxS0,17997
29
+ typer/cli.py,sha256=PHBnjaPYKplR2Ksk8LywA6gzO1DVpNX6O9R_bzz5EHU,9404
30
+ typer/colors.py,sha256=e42j8uB520hLpX5C_0fiR3OOoIFMbhO3ADZvv6hlAV8,430
31
+ typer/completion.py,sha256=fXEMvR_8qy1e_JNIvN4BMNzFTcifFJOGd2hUWNiSfSQ,4765
32
+ typer/core.py,sha256=HyKdn0unvpE_Y17Pa0I6KCZaOTvFsjaoczeH9FAMroQ,24682
33
+ typer/main.py,sha256=7994cNYcKviSj9dWCNAhXxOaS_22VuKiEVWmg2Piqyo,39846
34
+ typer/models.py,sha256=JL4x11rB-6CtOhpHOFWW13ZFcM02abzO9jiZt_qaSY8,15908
35
+ typer/params.py,sha256=kuEE01zsiIBPjkeyv9lFeXRsBPW3BN1-U6aqwbL6lPE,13787
36
+ typer/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
37
+ typer/rich_utils.py,sha256=Gx3OFq7bS8vb0hKGQxOrCcawHFqZOzUt3baQz9G4Avw,24208
38
+ typer/testing.py,sha256=Mb_HqTkpPw24qsVYxCQrDJpjq_oOHlgqZpauWofxkq0,874
39
+ typer/utils.py,sha256=XESS5TnyP7ftYbUt0rUJajMVtqCQ7Ndzd8VHg3V9WaQ,7414
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/annotate_warns.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API void AnnotateWarns(const std::shared_ptr<Graph>& graph);
9
+
10
+ } // namespace jit
11
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/autocast.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #pragma once
3
+
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ TORCH_API void Autocast(const std::shared_ptr<Graph>& graph);
10
+
11
+ TORCH_API bool setAutocastMode(bool value);
12
+ TORCH_API bool autocastEnabled();
13
+
14
+ } // namespace jit
15
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/bailout_graph.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ivalue.h>
5
+ #include <ATen/core/jit_type.h>
6
+ #include <ATen/core/stack.h>
7
+ #include <torch/csrc/Export.h>
8
+ #include <torch/csrc/jit/ir/ir.h>
9
+
10
+ #include <list>
11
+ #include <vector>
12
+
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ // Replaces prim::Guard nodes with prim::BailOut nodes and
17
+ // computes sets of inputs needed to resume execution at
18
+ // bailout points
19
+ TORCH_API void InsertBailOuts(std::shared_ptr<Graph> graph);
20
+
21
+ // Builds a bailout graph into `target` (which is an empty graph)
22
+ // for a given bailout point `bailout_index`
23
+ // from the original graph `orig` (the original unoptimized graph)
24
+ // BailOut graphs allow Interpreter to resume
25
+ // execution of the (un/de)optimized graph (i.e.
26
+ // a graph that doesn't rely on any assumptions derived from
27
+ // on profiling information) from a given BailOut point
28
+ // should any of the assumptions fail for an actual input.
29
+ TORCH_API std::shared_ptr<Graph> BuildBailOutGraphFrom(
30
+ int64_t bailout_index,
31
+ const std::shared_ptr<Graph>& orig,
32
+ const std::shared_ptr<Graph>& target);
33
+ } // namespace jit
34
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_pooling.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API void ConstantPooling(const std::shared_ptr<Graph>& graph);
9
+
10
+ }
11
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_linear_bn.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ struct TORCH_API LinearBNParameters {
9
+ at::Tensor linear_w;
10
+ at::Tensor linear_b;
11
+ at::Tensor bn_rm;
12
+ at::Tensor bn_rv;
13
+ double bn_eps = 0.0;
14
+ at::Tensor bn_w;
15
+ at::Tensor bn_b;
16
+ };
17
+
18
+ /**
19
+ * Given the current weight and bias tensors of a Linear module and parameters
20
+ * of the BatchNorm module we're folding with, compute the updated values
21
+ * for the weight and bias.
22
+ *
23
+ * The function is basically copied from torch/nn/utils/fusion.py
24
+ */
25
+ TORCH_API std::tuple<at::Tensor, at::Tensor> computeUpdatedLinearWeightAndBias(
26
+ const LinearBNParameters& p);
27
+
28
+ } // namespace jit
29
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/freeze_module.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /** \brief This file defines freezing Torchscript module API.
2
+ *
3
+ * This API has python-binding and can be invoked directly or as a part of
4
+ * general optimization pipeline.
5
+ */
6
+ #pragma once
7
+
8
+ #include <torch/csrc/jit/api/module.h>
9
+ #include <torch/csrc/jit/ir/ir.h>
10
+
11
+ /** \brief Freeze Module, i.e., Assume all attributes are constants.
12
+ *
13
+ * Freezing module is a functionality that allows the JIT to internalize
14
+ * immutable attributes. Combined with inlining, the module is aggressively
15
+ * optimized and significant overhead is optimized away. The freezeModule API
16
+ * produces a cloned frozen module.
17
+ */
18
+
19
+ namespace torch {
20
+ namespace jit {
21
+
22
+ TORCH_API Module freeze_module(
23
+ const Module& module,
24
+ std::vector<std::string> preservedAttrs = std::vector<std::string>(),
25
+ bool freezeInterfaces = true,
26
+ bool preserveParameters = false);
27
+
28
+ // Clone-free version of freeze_module. This modifies the module inplace.
29
+ // Use this version to avoid extra memory usage incurred by cloning the module.
30
+ TORCH_API void freeze_module_inplace(
31
+ Module* module,
32
+ std::vector<std::string> preservedAttrs = std::vector<std::string>(),
33
+ bool freezeInterfaces = true,
34
+ bool preserveParameters = false);
35
+ } // namespace jit
36
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_folding.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Fuses Convolution -> Batchnorm into a single Convolution by
9
+ // folding batchnorm weights into conv weights.
10
+ // This pass only works on Frozen Graphs; otherwise it is a No-Op.
11
+ TORCH_API bool FoldFrozenConvBatchnorm(std::shared_ptr<Graph>& graph);
12
+
13
+ // Fuses Convolution -> Add/Sub into a single Convolution by
14
+ // folding add constant tensor into conv weights.
15
+ // This pass only works on Frozen Graphs; otherwise it is a No-Op.
16
+ TORCH_API bool FoldFrozenConvAddOrSub(std::shared_ptr<Graph>& graph);
17
+
18
+ // Fuses Convolution -> Mul/Div into a single Convolution by
19
+ // folding add constant tensor into conv weights.
20
+ // This pass only works on Frozen Graphs; otherwise it is a No-Op.
21
+ TORCH_API bool FoldFrozenConvMulOrDiv(std::shared_ptr<Graph>& graph);
22
+
23
+ } // namespace jit
24
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_graph_optimizations.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ /** \brief Runs a set of Optimizations that Optimize Frozen Graphs
6
+ *
7
+ * Currently this set of optimizations is:
8
+ * - FoldFrozenConvBatchnorm
9
+ * - FoldFrozenConvAddOrSub
10
+ * - FoldFrozenConvMulOrDiv
11
+ * - FoldFrozenLinearBatchnorm
12
+ */
13
+
14
+ namespace torch {
15
+ namespace jit {
16
+
17
+ TORCH_API void OptimizeFrozenGraph(
18
+ std::shared_ptr<Graph>& graph,
19
+ bool optimize_numerics = true);
20
+
21
+ } // namespace jit
22
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_relu.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+ TORCH_API void FuseAddRelu(script::Module& module);
9
+ TORCH_API void FuseAddRelu(std::shared_ptr<Graph>& graph);
10
+ } // namespace jit
11
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/inplace_check.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ TORCH_API void CheckInplace(std::shared_ptr<Graph>& graph);
9
+
10
+ }
11
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/integer_value_refinement.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // return true if graph is modified
9
+ TORCH_API bool RefineIntegerValues(const std::shared_ptr<Graph>& graph);
10
+
11
+ } // namespace jit
12
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/loop_unrolling.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // return true if graph is modified
9
+ TORCH_API bool UnrollLoops(std::shared_ptr<Graph>& graph);
10
+
11
+ // Only unrolls constant loops. Will unroll them regardless of loop block size
12
+ TORCH_API bool UnrollConstantLoops(std::shared_ptr<Graph>& graph);
13
+
14
+ TORCH_API Node* PeelLoop(Node* n, size_t times);
15
+
16
+ // return true if graph is modified
17
+ TORCH_API bool PeelProfilingLoops(const std::shared_ptr<Graph>& graph);
18
+
19
+ struct TORCH_API LoopsPeeler {
20
+ LoopsPeeler(std::function<bool(Node* n)> callback, size_t num_iterations = 1)
21
+ : callback_(std::move(callback)), num_iterations_(num_iterations) {}
22
+
23
+ bool run(const std::shared_ptr<Graph>& graph);
24
+
25
+ private:
26
+ void collectLoop(Node* n);
27
+ void collectLoops(Block* block);
28
+ void peelLoops();
29
+
30
+ std::function<bool(Node* n)> callback_ = nullptr;
31
+ Node* in_loop_ = nullptr;
32
+ std::list<Node*> loops_to_peel_;
33
+ size_t num_iterations_ = 1;
34
+ };
35
+ } // namespace jit
36
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/metal_rewrite.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/jit/api/module.h>
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+ #include <string>
5
+ #include <vector>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+ TORCH_API void metalInsertPrePackedOps(std::shared_ptr<Graph>& graph);
10
+ TORCH_API void metalInsertPrePackedOps(script::Module& module);
11
+ TORCH_API void metalFusePrePackedConvWithClamp(script::Module& module);
12
+ TORCH_API void metalFoldPrePackingOps(script::Module& module);
13
+ TORCH_API script::Module metalOptimizeForMobile(
14
+ const script::Module& module,
15
+ const std::vector<std::string>& preserved_methods);
16
+ } // namespace jit
17
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/prepack_folding.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ using PrePackingOpsFilterFn = std::function<bool(Node*)>;
10
+
11
+ void PrePackingOpsFolder(
12
+ script::Module& m,
13
+ const PrePackingOpsFilterFn& is_foldable_op,
14
+ const std::string& attr_prefix);
15
+
16
+ } // namespace jit
17
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/dedup_module_uses.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ /** Recursively deduplicate multiple uses of the same module by
9
+ * creating an instance clone for each use of the module, which means
10
+ * the type will be the same as before and all the attributes will be
11
+ * copied, then we'll change the use of the original module to the use
12
+ * of cloned module in the Graph.
13
+ *
14
+ * This is done to ensure that modules can survive destructive passes
15
+ * without changing model behavior. For example, here:
16
+ *
17
+ * x = self.conv1(x)
18
+ * x = self.relu(x)
19
+ * x = self.conv2(x)
20
+ * x = self.relu(x)
21
+ *
22
+ * self.relu needs to be deduplicated for potential future destructive passes
23
+ * to work properly.
24
+ */
25
+ TORCH_API void DedupModuleUses(Module& module);
26
+
27
+ } // namespace jit
28
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/finalize.h ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <torch/csrc/jit/passes/quantization/quantization_type.h>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ /** \brief Backend specific pass to fuse dequantize - op - quantize calls
11
+ * as quantized_op calls.
12
+ *
13
+ * Right now this is a fusion for fbgemm backend and only works for quantized
14
+ * conv op, we'll extend to more ops and more backends in the future.
15
+ *
16
+ * Currently supported fusion:
17
+ * q(conv2d(dq(a), dq(w), dq(b))) --> to_nchw(fbgemm_conv2d(prepack(to_nhwc(a)),
18
+ * prepack(to_nhwc(w)),
19
+ * prepack(to_nhwc(b))))
20
+ *
21
+ * q(linear(dq(a), dq(w), dq(b))) --> to_nchw(fbgemm_linear(prepack(to_nhwc(a)),
22
+ * prepack(to_nhwc(w)),
23
+ * prepack(to_nhwc(b))))
24
+ *
25
+ * \param graph the graph we want to apply fusion
26
+ */
27
+ TORCH_API void QuantFusion(
28
+ std::shared_ptr<Graph>& graph,
29
+ QuantType quant_type = QuantType::STATIC);
30
+
31
+ /** \brief Insert prepack and unpack function in graph
32
+ * We want add pack/unpack functions for quantized weight because later we want
33
+ * to fold the packed weight as an attribute of the module, in order to reduce
34
+ * the cost of packing the weight on the fly in quantized models.
35
+ *
36
+ * Each quantized op has it's corresponding prepack/unpack function,
37
+ * right now, we only need to do prepack/unpack for quantized::linear
38
+ * and quantized::conv2d.
39
+ */
40
+ TORCH_API void InsertPrepackUnpack(std::shared_ptr<Graph>& graph);
41
+
42
+ /** \brief Insert pack and unpack function in all graphs
43
+ * of module
44
+ *
45
+ * Go through graphs of all the methods of all child modules
46
+ * and call InsertPrepackUnpack on the graph.
47
+ */
48
+ TORCH_API void InsertPrepackUnpack(Module& module);
49
+
50
+ TORCH_API script::Module Finalize(
51
+ script::Module& module,
52
+ QuantType quant_type = QuantType::STATIC,
53
+ const std::vector<std::string>& preserved_attrs =
54
+ std::vector<std::string>());
55
+
56
+ TORCH_API void FoldQuantizedPrepackingOps(Module& module);
57
+
58
+ TORCH_API Module FinalizeOnDevicePTQ(
59
+ Module& module,
60
+ QuantType quant_type,
61
+ const std::string& method_name);
62
+ } // namespace jit
63
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/insert_quant_dequant.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <torch/csrc/jit/passes/quantization/quantization_type.h>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ /** Replicate quantize node for prim::If blocks, so that we can match
11
+ * quantization patterns in prim::If blocks
12
+ */
13
+ TORCH_API void ReplicateQuant(std::shared_ptr<Graph>& graph);
14
+
15
+ /** Replicate dequantize node for each use, so that we can match
16
+ * quantization patterns
17
+ */
18
+ TORCH_API void ReplicateDeQuant(std::shared_ptr<Graph>& graph);
19
+
20
+ /** \brief Insert quantize - dequantize calls to the Tensors
21
+ * that are observed in insert_observers pass
22
+ *
23
+ * For each Tensor that is observed, get the observer module and call
24
+ * calculate_qparam on the observer module to get quantization parameters
25
+ * and add quantize - int_repr - dequantize function calls using these
26
+ * parameters we also have special handling for quantizing "bias" right now.
27
+ *
28
+ * \param module the input module
29
+ * \param method_name the method we want to insert quantization calls for
30
+ */
31
+ TORCH_API Module InsertQuantDeQuant(
32
+ Module& module,
33
+ const std::string& method_name,
34
+ bool inplace,
35
+ bool debug,
36
+ QuantType quant_type = QuantType::STATIC);
37
+
38
+ TORCH_API Module InsertQuantDeQuantOnDevicePTQ(
39
+ Module& module,
40
+ const std::string& method_name,
41
+ bool inplace,
42
+ bool debug,
43
+ QuantType quant_type = QuantType::STATIC);
44
+
45
+ } // namespace jit
46
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/quantization_patterns.h ADDED
@@ -0,0 +1,1272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <torch/csrc/jit/ir/subgraph_matcher.h>
6
+ #include <torch/csrc/jit/jit_log.h>
7
+ #include <torch/csrc/jit/passes/quantization/helper.h>
8
+ #include <torch/csrc/jit/passes/subgraph_rewrite.h>
9
+ #include <string>
10
+ #include <unordered_map>
11
+ #include <utility>
12
+
13
+ namespace torch {
14
+ namespace jit {
15
+
16
+ struct QuantFusionInfo {
17
+ std::string quantized_op_name;
18
+ std::string pattern;
19
+ std::string replacement;
20
+ std::vector<MatchFilter> filters = {};
21
+ };
22
+
23
+ namespace {
24
+ std::string getExtraArgList(std::vector<std::string> extra_args) {
25
+ return std::accumulate(
26
+ extra_args.begin(),
27
+ extra_args.end(),
28
+ std::string(),
29
+ [](std::string acc, const std::string& arg) { return acc + ", " + arg; });
30
+ }
31
+
32
+ // Get the pattern we want to replace the match with
33
+ std::string getAtenOpPattern(
34
+ const std::string& graph_header,
35
+ const std::string& op_name,
36
+ const std::vector<std::string>& extra_op_args,
37
+ bool scalar_args = false) {
38
+ std::vector<std::string> _extra_op_args = extra_op_args;
39
+ std::string aten_op_pattern = graph_header;
40
+ if (scalar_args) {
41
+ for (const auto& extra_arg : _extra_op_args) {
42
+ aten_op_pattern
43
+ .append(R"(
44
+ )")
45
+ .append(extra_arg)
46
+ .append("_scalar = aten::item(")
47
+ .append(extra_arg)
48
+ .append(")");
49
+ }
50
+
51
+ for (auto& _extra_op_arg : _extra_op_args) {
52
+ _extra_op_arg.append("_scalar");
53
+ }
54
+ }
55
+ const auto& extra_op_arg_list = getExtraArgList(std::move(_extra_op_args));
56
+ aten_op_pattern += R"(
57
+ %r = )";
58
+ aten_op_pattern += op_name + "(" + "%a_quant" + extra_op_arg_list + ")";
59
+ aten_op_pattern += R"(
60
+ return (%r) )";
61
+ return aten_op_pattern;
62
+ }
63
+
64
+ // generate ops for quantize pattern for a scalar value
65
+ std::string getQuantizeForScalar(const std::string& value) {
66
+ // 6 is `torch.float` ScalarType, we are creating a float scalar
67
+ // tensor from a scalar value
68
+ std::string quantize_pattern = R"(
69
+ )" +
70
+ value + "_float_scalar_type : int = prim::Constant[value=6]()";
71
+ quantize_pattern += R"(
72
+ )" +
73
+ value + "_none : None = prim::Constant()";
74
+ quantize_pattern += R"(
75
+ )" +
76
+ value + "_tensor : Tensor = aten::scalar_tensor(" + value + ", " + value +
77
+ "_float_scalar_type";
78
+ for (const auto i : c10::irange(3)) {
79
+ (void)i; // Suppress unused variable warning
80
+ quantize_pattern += ", " + value + "_none";
81
+ }
82
+ quantize_pattern += ")";
83
+ quantize_pattern +=
84
+ R"(
85
+ )" +
86
+ value + "_quant = aten::quantize_per_tensor(" + value + "_tensor" +
87
+ getExtraArgList(
88
+ {value + "_scale", value + "_zero_point", value + "_dtype"}) +
89
+ ")";
90
+ return quantize_pattern;
91
+ }
92
+
93
+ std::string getDequantize(const std::string& value) {
94
+ return R"(
95
+ )" +
96
+ value + "_dequant = aten::dequantize(" + value + "_quant)";
97
+ }
98
+
99
+ std::string getItem(const std::string& value) {
100
+ return R"(
101
+ )" +
102
+ value + "_scalar : float = aten::item(" + value + "_dequant)";
103
+ }
104
+
105
+ // Patterns for the ops that inherit parameters from input
106
+ std::string getInputTensorQParamOpPattern(
107
+ const std::string& op_name,
108
+ const std::vector<std::string>& extra_op_args) {
109
+ const auto& extra_op_arg_list = getExtraArgList(extra_op_args);
110
+ std::string op_pattern = "graph(%a_quant" + extra_op_arg_list + "):" + R"(
111
+ %a_dequant = aten::dequantize(%a_quant)
112
+ %r = )" +
113
+ op_name + "(" + "%a_dequant" + extra_op_arg_list + ")" + R"(
114
+ %r_scale : float = aten::q_scale(%a_quant)
115
+ %r_zero_point : int = aten::q_zero_point(%a_quant)
116
+ %r_dtype : int = prim::dtype(%a_quant)
117
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
118
+ return (%r_quant) )";
119
+ return op_pattern;
120
+ }
121
+
122
+ // QuantFusionInfo for the ops that inherit parameters from input
123
+ QuantFusionInfo getInputTensorQParamOpFusionInfo(
124
+ const std::string& op_name,
125
+ const std::vector<std::string>& extra_op_args) {
126
+ std::string op_pattern =
127
+ getInputTensorQParamOpPattern(op_name, extra_op_args);
128
+ const auto& extra_op_arg_list = getExtraArgList(extra_op_args);
129
+ std::string graph_header = "graph(%a_quant" + extra_op_arg_list + "):";
130
+ std::string op_replacement =
131
+ getAtenOpPattern(graph_header, op_name, extra_op_args);
132
+
133
+ return {op_name, std::move(op_pattern), std::move(op_replacement)};
134
+ }
135
+
136
+ // quant fusion for ops like `quantized::add_scalar`, `quantized::mul_scalar`
137
+ QuantFusionInfo getBinaryOpScalarFusionInfo(
138
+ const std::string& op_name,
139
+ const std::vector<std::string>& extra_op_args,
140
+ const std::string& quantized_op_name,
141
+ const std::vector<std::string>& extra_quantized_op_args,
142
+ const std::vector<MatchFilter>& filters = {}) {
143
+ std::string op_pattern =
144
+ getInputTensorQParamOpPattern(op_name, extra_op_args);
145
+
146
+ const auto& extra_op_arg_list = getExtraArgList(extra_op_args);
147
+ std::string graph_header = "graph(%a_quant" + extra_op_arg_list + "):";
148
+ std::string op_replacement = getAtenOpPattern(
149
+ graph_header, quantized_op_name, extra_quantized_op_args);
150
+
151
+ return {op_name, std::move(op_pattern), std::move(op_replacement), filters};
152
+ }
153
+
154
+ QuantFusionInfo getClampOpFusionInfo(
155
+ const std::string& op_name,
156
+ const std::vector<std::string>& extra_op_args) {
157
+ std::vector<std::string> header_args = extra_op_args;
158
+ std::vector<std::string> input_qparams = {"_scale", "_zero_point", "_dtype"};
159
+ for (const auto& arg : extra_op_args) {
160
+ for (const auto& qparam : input_qparams) {
161
+ header_args.push_back(arg + qparam);
162
+ }
163
+ }
164
+ for (const auto& qparam : input_qparams) {
165
+ header_args.push_back("%r" + qparam);
166
+ }
167
+ const auto& extra_header_arg_list = getExtraArgList(std::move(header_args));
168
+ std::string graph_header = "graph(%a_quant" + extra_header_arg_list + "):";
169
+ std::string op_pattern = graph_header;
170
+ for (const auto& arg : extra_op_args) {
171
+ op_pattern += getQuantizeForScalar(arg);
172
+ op_pattern += getDequantize(arg);
173
+ op_pattern += getItem(arg);
174
+ }
175
+ op_pattern += getDequantize("%a");
176
+ op_pattern += R"(
177
+ %r = )";
178
+ std::vector<std::string> scalar_extra_args;
179
+ scalar_extra_args.reserve(extra_op_args.size());
180
+ for (const auto& arg : extra_op_args) {
181
+ scalar_extra_args.push_back(arg + "_scalar");
182
+ }
183
+ op_pattern += op_name + "(" + "%a_dequant" +
184
+ getExtraArgList(std::move(scalar_extra_args)) + ")";
185
+ // IR pattern common to all ops that inherit qparam from input
186
+ op_pattern += R"(
187
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
188
+ return (%r_quant) )";
189
+
190
+ std::string aten_op_pattern =
191
+ getAtenOpPattern(graph_header, op_name, extra_op_args);
192
+
193
+ return {op_name, std::move(op_pattern), std::move(aten_op_pattern)};
194
+ }
195
+
196
+ // Patterns for the ops that has fixed quantization parameters
197
+ QuantFusionInfo getFixedQParamOpFusionInfo(
198
+ const std::string& op_name,
199
+ const std::vector<std::string>& extra_op_args,
200
+ bool is_symmetric) {
201
+ const auto& extra_op_arg_list = getExtraArgList(extra_op_args);
202
+ std::string graph_header = "graph(%a_quant" + extra_op_arg_list + "):";
203
+ std::string op_pattern = graph_header;
204
+ op_pattern += R"(
205
+ %a_dequant = aten::dequantize(%a_quant)
206
+ %r = )";
207
+ op_pattern += op_name + "(" + "%a_dequant" + extra_op_arg_list + ")";
208
+ // IR pattern common to all ops with fixed quantization parameters for
209
+ // asymetric quantization
210
+ std::string asym_fixed_qparam_op_suffix = R"(
211
+ %r_scale : float = prim::Constant[value=0.00390625]()
212
+ %r_zero_point : int = prim::Constant[value=0]()
213
+ %r_dtype : int = prim::Constant[value=13]()
214
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
215
+ return (%r_quant) )";
216
+
217
+ std::string sym_fixed_qparam_op_suffix = R"(
218
+ %r_scale : float = prim::Constant[value=0.0078125]()
219
+ %r_zero_point : int = prim::Constant[value=128]()
220
+ %r_dtype : int = prim::Constant[value=13]()
221
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
222
+ return (%r_quant) )";
223
+ op_pattern +=
224
+ is_symmetric ? sym_fixed_qparam_op_suffix : asym_fixed_qparam_op_suffix;
225
+
226
+ std::string aten_op_pattern =
227
+ getAtenOpPattern(graph_header, op_name, extra_op_args);
228
+
229
+ return {op_name, std::move(op_pattern), std::move(aten_op_pattern)};
230
+ }
231
+
232
+ // filter that checks %b_scalar is a scalar
233
+ bool input_b_is_scalar(
234
+ const Match& match,
235
+ const std::unordered_map<std::string, Value*>& vmap) {
236
+ const auto& match_vmap = match.values_map;
237
+ auto b_scalar = match_vmap.at(vmap.at("b_scalar"));
238
+ return isScalar(b_scalar);
239
+ }
240
+
241
+ // Patterns for ops that require observation for output quantization parameters
242
+ // Example:
243
+ //
244
+ // before fusion:
245
+ //
246
+ // graph(%a_quant, %r_scale, %r_zero_point, %r_dtype):
247
+ // %a_dequant = aten::dequantize(%a_quant)
248
+ // %r = {op_name}(%a_dequant, {extra_args})
249
+ // %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point,
250
+ // %r_dtype) return (%r_quant)
251
+ //
252
+ // after fusion:
253
+ //
254
+ // graph(%a_quant, %r_scale, %r_zero_point, %r_dtype):
255
+ // %r_quant = {quantized_op_name}(%a_quant, {extra_args}, %r_scale,
256
+ // %r_zero_point) return (%r_quant)
257
+ QuantFusionInfo getObservedQParamOpFusionInfo(
258
+ const std::string& fp_op_name,
259
+ const std::string& q_op_name,
260
+ const std::vector<std::string>& fp_extra_args,
261
+ const std::vector<std::string>& q_extra_args) {
262
+ const auto& fp_extra_arg_list = getExtraArgList(fp_extra_args);
263
+ const auto& q_extra_arg_list = getExtraArgList(q_extra_args);
264
+
265
+ std::string op_pattern = "graph(%a_quant" + fp_extra_arg_list +
266
+ ", %r_scale, %r_zero_point, %r_dtype):" + R"(
267
+ %a_dequant = aten::dequantize(%a_quant)
268
+ %r = )" +
269
+ fp_op_name + "(" + "%a_dequant" + fp_extra_arg_list + ")" + R"(
270
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
271
+ return (%r_quant) )";
272
+
273
+ std::string aten_op_pattern = "graph(%a_quant" + fp_extra_arg_list +
274
+ ", %r_scale, %r_zero_point, %r_dtype):" + R"(
275
+ %r_quant = )" +
276
+ q_op_name + "(%a_quant" + q_extra_arg_list +
277
+ ", %r_scale, %r_zero_point)" + R"(
278
+ return (%r_quant) )";
279
+
280
+ return {q_op_name, std::move(op_pattern), std::move(aten_op_pattern)};
281
+ }
282
+
283
+ } // namespace
284
+
285
+ static std::vector<QuantFusionInfo> quant_fusion_pattern_and_replacements() {
286
+ // aten::conv1d
287
+ std::string conv1d = R"(
288
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
289
+ %a_dequant = aten::dequantize(%a_quant)
290
+ %w_quant : Tensor, %b : Tensor? = quantized::conv1d_unpack(%packed_params)
291
+ %w_dequant = aten::dequantize(%w_quant)
292
+ %r = aten::conv1d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
293
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
294
+ return (%r_quant) )";
295
+
296
+ // aten::conv1d - aten::relu
297
+ std::string conv1d_relu = R"(
298
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
299
+ %a_dequant = aten::dequantize(%a_quant)
300
+ %w_quant : Tensor, %b : Tensor? = quantized::conv1d_unpack(%packed_params)
301
+ %w_dequant = aten::dequantize(%w_quant)
302
+ %conv_out = aten::conv1d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
303
+ %r = aten::relu(%conv_out)
304
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
305
+ return (%r_quant) )";
306
+
307
+ // aten::conv1d - aten::relu_
308
+ std::string conv1d_inplace_relu = R"(
309
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
310
+ %a_dequant = aten::dequantize(%a_quant)
311
+ %w_quant : Tensor, %b : Tensor? = quantized::conv1d_unpack(%packed_params)
312
+ %w_dequant = aten::dequantize(%w_quant)
313
+ %conv_out = aten::conv1d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
314
+ %r = aten::relu_(%conv_out)
315
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
316
+ return (%r_quant) )";
317
+
318
+ // quantized::conv1d
319
+ std::string quantized_conv1d = R"(
320
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
321
+ %r_quant = quantized::conv1d(%a_quant, %packed_params, %r_scale, %r_zero_point)
322
+ return (%r_quant) )";
323
+
324
+ // quantized::conv1d_relu
325
+ std::string quantized_conv1d_relu = R"(
326
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
327
+ %r_quant = quantized::conv1d_relu(%a_quant, %packed_params, %r_scale, %r_zero_point)
328
+ return (%r_quant) )";
329
+
330
+ // aten::conv2d
331
+ std::string conv2d = R"(
332
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
333
+ %a_dequant = aten::dequantize(%a_quant)
334
+ %w_quant : Tensor, %b : Tensor? = quantized::conv2d_unpack(%packed_params)
335
+ %w_dequant = aten::dequantize(%w_quant)
336
+ %r = aten::conv2d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
337
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
338
+ return (%r_quant) )";
339
+
340
+ // aten::conv2d - aten::relu
341
+ std::string conv2d_relu = R"(
342
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
343
+ %a_dequant = aten::dequantize(%a_quant)
344
+ %w_quant : Tensor, %b : Tensor? = quantized::conv2d_unpack(%packed_params)
345
+ %w_dequant = aten::dequantize(%w_quant)
346
+ %conv_out = aten::conv2d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
347
+ %r = aten::relu(%conv_out)
348
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
349
+ return (%r_quant) )";
350
+
351
+ // aten::conv2d - aten::relu_
352
+ std::string conv2d_inplace_relu = R"(
353
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
354
+ %a_dequant = aten::dequantize(%a_quant)
355
+ %w_quant : Tensor, %b : Tensor? = quantized::conv2d_unpack(%packed_params)
356
+ %w_dequant = aten::dequantize(%w_quant)
357
+ %conv_out = aten::conv2d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
358
+ %r = aten::relu_(%conv_out)
359
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
360
+ return (%r_quant) )";
361
+
362
+ // quantized::conv2d
363
+ std::string quantized_conv2d = R"(
364
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
365
+ %r_quant = quantized::conv2d(%a_quant, %packed_params, %r_scale, %r_zero_point)
366
+ return (%r_quant) )";
367
+
368
+ // quantized::conv2d_relu
369
+ std::string quantized_conv2d_relu = R"(
370
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
371
+ %r_quant = quantized::conv2d_relu(%a_quant, %packed_params, %r_scale, %r_zero_point)
372
+ return (%r_quant) )";
373
+
374
+ // aten::conv3d
375
+ std::string conv3d = R"(
376
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
377
+ %a_dequant = aten::dequantize(%a_quant)
378
+ %w_quant : Tensor, %b : Tensor? = quantized::conv3d_unpack(%packed_params)
379
+ %w_dequant = aten::dequantize(%w_quant)
380
+ %r = aten::conv3d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
381
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
382
+ return (%r_quant) )";
383
+
384
+ // aten::conv3d - aten::relu
385
+ std::string conv3d_relu = R"(
386
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
387
+ %a_dequant = aten::dequantize(%a_quant)
388
+ %w_quant : Tensor, %b : Tensor? = quantized::conv3d_unpack(%packed_params)
389
+ %w_dequant = aten::dequantize(%w_quant)
390
+ %conv_out = aten::conv3d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
391
+ %r = aten::relu(%conv_out)
392
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
393
+ return (%r_quant) )";
394
+
395
+ // aten::conv3d - aten::relu_
396
+ std::string conv3d_inplace_relu = R"(
397
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
398
+ %a_dequant = aten::dequantize(%a_quant)
399
+ %w_quant : Tensor, %b : Tensor? = quantized::conv3d_unpack(%packed_params)
400
+ %w_dequant = aten::dequantize(%w_quant)
401
+ %conv_out = aten::conv3d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
402
+ %r = aten::relu_(%conv_out)
403
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
404
+ return (%r_quant) )";
405
+
406
+ // quantized::conv3d
407
+ std::string quantized_conv3d = R"(
408
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
409
+ %r_quant = quantized::conv3d(%a_quant, %packed_params, %r_scale, %r_zero_point)
410
+ return (%r_quant) )";
411
+
412
+ // quantized::conv3d_relu
413
+ std::string quantized_conv3d_relu = R"(
414
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %dilation, %groups):
415
+ %r_quant = quantized::conv3d_relu(%a_quant, %packed_params, %r_scale, %r_zero_point)
416
+ return (%r_quant) )";
417
+
418
+ // aten::conv_transpose1d
419
+ std::string conv_transpose1d = R"(
420
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %output_padding, %groups, %dilation):
421
+ %a_dequant = aten::dequantize(%a_quant)
422
+ %w_quant : Tensor, %b : Tensor? = quantized::conv_transpose1d_unpack(%packed_params)
423
+ %w_dequant = aten::dequantize(%w_quant)
424
+ %r = aten::conv_transpose1d(%a_dequant, %w_dequant, %b, %stride, %padding, %output_padding, %groups, %dilation)
425
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
426
+ return (%r_quant) )";
427
+
428
+ // quantized::conv_transpose1d
429
+ std::string quantized_conv_transpose1d = R"(
430
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %output_padding, %groups, %dilation):
431
+ %r_quant = quantized::conv_transpose1d(%a_quant, %packed_params, %r_scale, %r_zero_point)
432
+ return (%r_quant) )";
433
+
434
+ // aten::conv_transpose2d
435
+ std::string conv_transpose2d = R"(
436
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %output_padding, %groups, %dilation):
437
+ %a_dequant = aten::dequantize(%a_quant)
438
+ %w_quant : Tensor, %b : Tensor? = quantized::conv_transpose2d_unpack(%packed_params)
439
+ %w_dequant = aten::dequantize(%w_quant)
440
+ %r = aten::conv_transpose2d(%a_dequant, %w_dequant, %b, %stride, %padding, %output_padding, %groups, %dilation)
441
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
442
+ return (%r_quant) )";
443
+
444
+ // quantized::conv_transpose1d
445
+ std::string quantized_conv_transpose2d = R"(
446
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype, %stride, %padding, %output_padding, %groups, %dilation):
447
+ %r_quant = quantized::conv_transpose2d(%a_quant, %packed_params, %r_scale, %r_zero_point)
448
+ return (%r_quant) )";
449
+
450
+ std::string add_relu = R"(
451
+ graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype):
452
+ %a_dequant = aten::dequantize(%a_quant)
453
+ %b_dequant = aten::dequantize(%b_quant)
454
+ %r_add = aten::add(%a_dequant, %b_dequant, %alpha)
455
+ %r_relu = aten::relu(%r_add)
456
+ %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype)
457
+ return (%r) )";
458
+
459
+ std::string add_inplace_relu = R"(
460
+ graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype):
461
+ %a_dequant = aten::dequantize(%a_quant)
462
+ %b_dequant = aten::dequantize(%b_quant)
463
+ %r_add = aten::add(%a_dequant, %b_dequant, %alpha)
464
+ %r_relu = aten::relu_(%r_add)
465
+ %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype)
466
+ return (%r) )";
467
+
468
+ std::string inplace_add_relu = R"(
469
+ graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype):
470
+ %a_dequant = aten::dequantize(%a_quant)
471
+ %b_dequant = aten::dequantize(%b_quant)
472
+ %r_add = aten::add_(%a_dequant, %b_dequant, %alpha)
473
+ %r_relu = aten::relu(%r_add)
474
+ %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype)
475
+ return (%r) )";
476
+
477
+ std::string inplace_add_inplace_relu = R"(
478
+ graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype):
479
+ %a_dequant = aten::dequantize(%a_quant)
480
+ %b_dequant = aten::dequantize(%b_quant)
481
+ %r_add = aten::add_(%a_dequant, %b_dequant, %alpha)
482
+ %r_relu = aten::relu_(%r_add)
483
+ %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype)
484
+ return (%r) )";
485
+
486
+ std::string quantized_add_relu = R"(
487
+ graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype):
488
+ %r = quantized::add_relu(%a_quant, %b_quant, %scale, %zero_point)
489
+ return (%r) )";
490
+
491
+ // aten::linear
492
+ std::string linear = R"(
493
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype):
494
+ %a_dequant = aten::dequantize(%a_quant)
495
+ %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params)
496
+ %w_dequant = aten::dequantize(%w_quant)
497
+ %r = aten::linear(%a_dequant, %w_dequant, %b)
498
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
499
+ return (%r_quant) )";
500
+
501
+ std::string linear_relu = R"(
502
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype):
503
+ %a_dequant = aten::dequantize(%a_quant)
504
+ %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params)
505
+ %w_dequant = aten::dequantize(%w_quant)
506
+ %linear_out = aten::linear(%a_dequant, %w_dequant, %b)
507
+ %r = aten::relu(%linear_out)
508
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
509
+ return (%r_quant) )";
510
+
511
+ std::string linear_inplace_relu = R"(
512
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype):
513
+ %a_dequant = aten::dequantize(%a_quant)
514
+ %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params)
515
+ %w_dequant = aten::dequantize(%w_quant)
516
+ %linear_out = aten::linear(%a_dequant, %w_dequant, %b)
517
+ %r = aten::relu_(%linear_out)
518
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
519
+ return (%r_quant) )";
520
+
521
+ // quantized::linear
522
+ std::string quantized_linear = R"(
523
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype):
524
+ %r = quantized::linear(%a_quant, %packed_params, %r_scale, %r_zero_point)
525
+ return (%r) )";
526
+
527
+ std::string quantized_linear_relu = R"(
528
+ graph(%a_quant, %packed_params, %r_scale, %r_zero_point, %r_dtype):
529
+ %r = quantized::linear_relu(%a_quant, %packed_params, %r_scale, %r_zero_point)
530
+ return (%r) )";
531
+
532
+ std::string cat = R"(
533
+ graph(%input_quant, %dim, %r_scale, %r_zero_point, %r_dtype):
534
+ %input_dequant = aten::dequantize(%input_quant)
535
+ %r = aten::cat(%input_dequant, %dim)
536
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
537
+ return (%r_quant) )";
538
+
539
+ std::string quantized_cat = R"(
540
+ graph(%input_quant, %dim, %r_scale, %r_zero_point, %r_dtype):
541
+ %r_quant = quantized::cat(%input_quant, %dim, %r_scale, %r_zero_point)
542
+ return (%r_quant) )";
543
+
544
+ // aten::add
545
+ std::string add = R"(
546
+ graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype):
547
+ %a_dequant = aten::dequantize(%a_quant)
548
+ %b_dequant = aten::dequantize(%b_quant)
549
+ %r_add = aten::add(%a_dequant, %b_dequant, %alpha)
550
+ %r = aten::quantize_per_tensor(%r_add, %scale, %zero_point, %dtype)
551
+ return (%r) )";
552
+
553
+ // TODO: add %dtype after when https://github.com/pytorch/pytorch/issues/34351
554
+ // is fixed
555
+ // quantized::add
556
+ std::string quantized_add = R"(
557
+ graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype):
558
+ %r = quantized::add(%a_quant, %b_quant, %scale, %zero_point)
559
+ return (%r) )";
560
+
561
+ // aten::add_
562
+ std::string inplace_add = R"(
563
+ graph(%a_quant, %b_quant, %alpha, %scale, %zero_point, %dtype):
564
+ %a_dequant = aten::dequantize(%a_quant)
565
+ %b_dequant = aten::dequantize(%b_quant)
566
+ %r_add = aten::add_(%a_dequant, %b_dequant, %alpha)
567
+ %r = aten::quantize_per_tensor(%r_add, %scale, %zero_point, %dtype)
568
+ return (%r) )";
569
+
570
+ auto add_scalar = getBinaryOpScalarFusionInfo(
571
+ "aten::add",
572
+ {"%b_scalar", "%alpha"},
573
+ "quantized::add_scalar",
574
+ {"%b_scalar"},
575
+ {aten_add_alpha_is_one, input_b_is_scalar});
576
+
577
+ auto add_scalar_out = getBinaryOpScalarFusionInfo(
578
+ "aten::add_",
579
+ {"%b_scalar", "%alpha"},
580
+ "quantized::add_scalar_out",
581
+ {"%b_scalar", "%a_quant"},
582
+ {aten_add_alpha_is_one, input_b_is_scalar});
583
+
584
+ // quantized::add_scalar_relu -- fusing quantized::add_scalar
585
+ // and aten::relu
586
+ auto quantized_add_scalar_relu_pattern = R"(
587
+ graph(%a_quant, %b_scalar):
588
+ %r_add = quantized::add_scalar(%a_quant, %b_scalar)
589
+ %r = aten::relu(%r_add)
590
+ return (%r) )";
591
+
592
+ auto quantized_add_scalar_inplace_relu_pattern = R"(
593
+ graph(%a_quant, %b_scalar):
594
+ %r_add = quantized::add_scalar(%a_quant, %b_scalar)
595
+ %r = aten::relu_(%r_add)
596
+ return (%r) )";
597
+
598
+ auto quantized_add_scalar_relu_replacement = R"(
599
+ graph(%a_quant, %b_scalar):
600
+ %r = quantized::add_scalar_relu(%a_quant, %b_scalar)
601
+ return (%r) )";
602
+
603
+ // quantized::add_scalar_relu_out -- fusing quantized::add_scalarOut
604
+ // and aten::relu
605
+ auto quantized_add_scalar_relu_out_pattern = R"(
606
+ graph(%a_quant, %b_scalar):
607
+ %r_add = quantized::add_scalar_out(%a_quant, %b_scalar, %a_quant)
608
+ %r = aten::relu(%r_add)
609
+ return (%r) )";
610
+
611
+ auto quantized_add_scalar_inplace_relu_out_pattern = R"(
612
+ graph(%a_quant, %b_scalar):
613
+ %r_add = quantized::add_scalar_out(%a_quant, %b_scalar, %a_quant)
614
+ %r = aten::relu_(%r_add)
615
+ return (%r) )";
616
+
617
+ auto quantized_add_scalar_relu_out_replacement = R"(
618
+ graph(%a_quant, %b_scalar):
619
+ %r = quantized::add_scalar_relu_out(%a_quant, %b_scalar, %a_quant)
620
+ return (%r) )";
621
+
622
+ // quantized::batch_norm
623
+ std::string batch_norm = R"(
624
+ graph(%a_quant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7, %scale, %zero_point, %scalar_type):
625
+ %a_dequant = aten::dequantize(%a_quant)
626
+ %r_bn = aten::batch_norm(%a_dequant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7)
627
+ %r = aten::quantize_per_tensor(%r_bn, %scale, %zero_point, %scalar_type)
628
+ return (%r) )";
629
+ std::string quantized_batch_norm = R"(
630
+ graph(%a_quant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7, %scale, %zero_point, %scalar_type):
631
+ %r = quantized::batch_norm(%a_quant, %weight, %bias, %mean, %var, %eps, %scale, %zero_point)
632
+ return (%r) )";
633
+
634
+ std::string batch_norm_relu = R"(
635
+ graph(%a_quant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7, %scale, %zero_point, %scalar_type):
636
+ %a_dequant = aten::dequantize(%a_quant)
637
+ %bn_out = aten::batch_norm(%a_dequant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7)
638
+ %relu = aten::relu(%bn_out)
639
+ %r = aten::quantize_per_tensor(%relu, %scale, %zero_point, %scalar_type)
640
+ return (%r) )";
641
+ std::string batch_norm_inplace_relu = R"(
642
+ graph(%a_quant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7, %scale, %zero_point, %scalar_type):
643
+ %a_dequant = aten::dequantize(%a_quant)
644
+ %bn_out = aten::batch_norm(%a_dequant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7)
645
+ %relu = aten::relu_(%bn_out)
646
+ %r = aten::quantize_per_tensor(%relu, %scale, %zero_point, %scalar_type)
647
+ return (%r) )";
648
+
649
+ std::string quantized_batch_norm_relu = R"(
650
+ graph(%a_quant, %weight, %bias, %mean, %var, %training, %eaf, %eps, %7, %scale, %zero_point, %scalar_type):
651
+ %r = quantized::batch_norm_relu(%a_quant, %weight, %bias, %mean, %var, %eps, %scale, %zero_point)
652
+ return (%r) )";
653
+
654
+ // aten::mul
655
+ std::string mul = R"(
656
+ graph(%a_quant, %b_quant, %scale, %zero_point, %dtype):
657
+ %a_dequant = aten::dequantize(%a_quant)
658
+ %b_dequant = aten::dequantize(%b_quant)
659
+ %r_mul = aten::mul(%a_dequant, %b_dequant)
660
+ %r = aten::quantize_per_tensor(%r_mul, %scale, %zero_point, %dtype)
661
+ return (%r) )";
662
+
663
+ // aten::mul_
664
+ std::string inplace_mul = R"(
665
+ graph(%a_quant, %b_quant, %scale, %zero_point, %dtype):
666
+ %a_dequant = aten::dequantize(%a_quant)
667
+ %b_dequant = aten::dequantize(%b_quant)
668
+ %r_mul = aten::mul_(%a_dequant, %b_dequant)
669
+ %r = aten::quantize_per_tensor(%r_mul, %scale, %zero_point, %dtype)
670
+ return (%r) )";
671
+
672
+ // quantized::mul
673
+ std::string quantized_mul = R"(
674
+ graph(%a_quant, %b_quant, %scale, %zero_point, %dtype):
675
+ %r = quantized::mul(%a_quant, %b_quant, %scale, %zero_point)
676
+ return (%r) )";
677
+
678
+ auto mul_scalar = getBinaryOpScalarFusionInfo(
679
+ "aten::mul",
680
+ {"%b_scalar"},
681
+ "quantized::mul_scalar",
682
+ {"%b_scalar"},
683
+ {input_b_is_scalar});
684
+
685
+ auto mul_scalar_out = getBinaryOpScalarFusionInfo(
686
+ "aten::mul_",
687
+ {"%b_scalar"},
688
+ "quantized::mul_scalar_out",
689
+ {"%b_scalar", "%a_quant"},
690
+ {input_b_is_scalar});
691
+
692
+ // quantized::mul_relu
693
+ std::string mul_relu = R"(
694
+ graph(%a_quant, %b_quant, %scale, %zero_point, %dtype):
695
+ %a_dequant = aten::dequantize(%a_quant)
696
+ %b_dequant = aten::dequantize(%b_quant)
697
+ %r_mul = aten::mul(%a_dequant, %b_dequant)
698
+ %r_relu = aten::relu(%r_mul)
699
+ %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype)
700
+ return (%r) )";
701
+
702
+ std::string mul_inplace_relu = R"(
703
+ graph(%a_quant, %b_quant, %scale, %zero_point, %dtype):
704
+ %a_dequant = aten::dequantize(%a_quant)
705
+ %b_dequant = aten::dequantize(%b_quant)
706
+ %r_mul = aten::mul(%a_dequant, %b_dequant)
707
+ %r_relu = aten::relu_(%r_mul)
708
+ %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype)
709
+ return (%r) )";
710
+
711
+ std::string inplace_mul_relu = R"(
712
+ graph(%a_quant, %b_quant, %scale, %zero_point, %dtype):
713
+ %a_dequant = aten::dequantize(%a_quant)
714
+ %b_dequant = aten::dequantize(%b_quant)
715
+ %r_mul = aten::mul_(%a_dequant, %b_dequant)
716
+ %r_relu = aten::relu(%r_mul)
717
+ %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype)
718
+ return (%r) )";
719
+
720
+ std::string inplace_mul_inplace_relu = R"(
721
+ graph(%a_quant, %b_quant, %scale, %zero_point, %dtype):
722
+ %a_dequant = aten::dequantize(%a_quant)
723
+ %b_dequant = aten::dequantize(%b_quant)
724
+ %r_mul = aten::mul_(%a_dequant, %b_dequant)
725
+ %r_relu = aten::relu_(%r_mul)
726
+ %r = aten::quantize_per_tensor(%r_relu, %scale, %zero_point, %dtype)
727
+ return (%r) )";
728
+
729
+ std::string quantized_mul_relu = R"(
730
+ graph(%a_quant, %b_quant, %scale, %zero_point, %dtype):
731
+ %r = quantized::mul_relu(%a_quant, %b_quant, %scale, %zero_point)
732
+ return (%r) )";
733
+
734
+ // quantized::mul_scalar_relu -- fusing quantized::mul_scalar
735
+ // and aten::relu
736
+ auto quantized_mul_scalar_relu_pattern = R"(
737
+ graph(%a_quant, %b_scalar):
738
+ %r_mul = quantized::mul_scalar(%a_quant, %b_scalar)
739
+ %r = aten::relu(%r_mul)
740
+ return (%r) )";
741
+
742
+ auto quantized_mul_scalar_inplace_relu_pattern = R"(
743
+ graph(%a_quant, %b_scalar):
744
+ %r_mul = quantized::mul_scalar(%a_quant, %b_scalar)
745
+ %r = aten::relu_(%r_mul)
746
+ return (%r) )";
747
+
748
+ auto quantized_mul_scalar_relu_replacement = R"(
749
+ graph(%a_quant, %b_scalar):
750
+ %r = quantized::mul_scalar_relu(%a_quant, %b_scalar)
751
+ return (%r) )";
752
+
753
+ // quantized::mul_scalar_relu_out -- fusing quantized::mul_scalarOut
754
+ // and aten::relu
755
+ auto quantized_mul_scalar_relu_out_pattern = R"(
756
+ graph(%a_quant, %b_scalar):
757
+ %r_mul = quantized::mul_scalar_out(%a_quant, %b_scalar, %a_quant)
758
+ %r = aten::relu(%r_mul)
759
+ return (%r) )";
760
+
761
+ auto quantized_mul_scalar_inplace_relu_out_pattern = R"(
762
+ graph(%a_quant, %b_scalar):
763
+ %r_mul = quantized::mul_scalar_out(%a_quant, %b_scalar, %a_quant)
764
+ %r = aten::relu_(%r_mul)
765
+ return (%r) )";
766
+
767
+ auto quantized_mul_scalar_relu_out_replacement = R"(
768
+ graph(%a_quant, %b_scalar):
769
+ %r = quantized::mul_scalar_relu_out(%a_quant, %b_scalar, %a_quant)
770
+ return (%r) )";
771
+
772
+ // quantized::elu
773
+ std::string elu = R"(
774
+ graph(%a_quant, %alpha, %scale, %input_scale, %r_scale, %r_zero_point, %r_dtype):
775
+ %a_dequant = aten::dequantize(%a_quant)
776
+ %r = aten::elu(%a_dequant, %alpha, %scale, %input_scale)
777
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
778
+ return (%r_quant) )";
779
+
780
+ std::string quantized_elu = R"(
781
+ graph(%a_quant, %alpha, %scale, %input_scale, %r_scale, %r_zero_point, %r_dtype):
782
+ %r_quant = quantized::elu(%a_quant, %r_scale, %r_zero_point, %alpha, %scale, %input_scale)
783
+ return (%r_quant) )";
784
+
785
+ std::string elu_ = R"(
786
+ graph(%a_quant, %alpha, %scale, %input_scale, %r_scale, %r_zero_point, %r_dtype):
787
+ %a_dequant = aten::dequantize(%a_quant)
788
+ %r = aten::elu_(%a_dequant, %alpha, %scale, %input_scale)
789
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
790
+ return (%r_quant) )";
791
+
792
+ // ============= General Ops that inherit quantization parameters from input
793
+ // tensor =============
794
+ auto avg_pool1d = getInputTensorQParamOpFusionInfo(
795
+ "aten::avg_pool1d",
796
+ {"%kernel_size",
797
+ "%stride",
798
+ "%padding",
799
+ "%ceil_mode",
800
+ "%count_include_pad"});
801
+
802
+ auto avg_pool2d = getInputTensorQParamOpFusionInfo(
803
+ "aten::avg_pool2d",
804
+ {"%kernel_size",
805
+ "%stride",
806
+ "%padding",
807
+ "%ceil_mode",
808
+ "%count_include_pad",
809
+ "%divisor_override"});
810
+
811
+ std::string common_general_value_op = R"(
812
+ %r_scale : float = aten::q_scale(%a_quant)
813
+ %r_zero_point : int = aten::q_zero_point(%a_quant)
814
+ %r_dtype : int = prim::dtype(%a_quant)
815
+ %r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
816
+ return (%r_quant) )";
817
+
818
+ auto avg_pool3d = getInputTensorQParamOpFusionInfo(
819
+ "aten::avg_pool3d",
820
+ {"%kernel_size",
821
+ "%stride",
822
+ "%padding",
823
+ "%ceil_mode",
824
+ "%count_include_pad",
825
+ "%divisor_override"});
826
+
827
+ auto adaptive_avg_pool1d = getInputTensorQParamOpFusionInfo(
828
+ "aten::adaptive_avg_pool1d", {"%output_size"});
829
+
830
+ auto adaptive_avg_pool2d = getInputTensorQParamOpFusionInfo(
831
+ "aten::adaptive_avg_pool2d", {"%output_size"});
832
+
833
+ auto adaptive_avg_pool3d = getInputTensorQParamOpFusionInfo(
834
+ "aten::adaptive_avg_pool3d", {"%output_size"});
835
+
836
+ auto mean1 = getInputTensorQParamOpFusionInfo("aten::mean", {"%dim"});
837
+
838
+ auto mean2 = getInputTensorQParamOpFusionInfo(
839
+ "aten::mean", {"%dim", "%keepdim", "%out"});
840
+
841
+ auto upsample_nearest1d_vec = getInputTensorQParamOpFusionInfo(
842
+ "aten::upsample_nearest1d", {"%output_size", "%scale_factors"});
843
+
844
+ auto upsample_nearest2d_vec = getInputTensorQParamOpFusionInfo(
845
+ "aten::upsample_nearest2d", {"%output_size", "%scale_factors"});
846
+
847
+ auto upsample_nearest3d_vec = getInputTensorQParamOpFusionInfo(
848
+ "aten::upsample_nearest3d", {"%output_size", "%scale_factors"});
849
+
850
+ auto upsample_linear1d_vec = getInputTensorQParamOpFusionInfo(
851
+ "aten::upsample_linear1d",
852
+ {"%output_size", "%align_corners", "%scale_factors"});
853
+
854
+ auto upsample_bilinear2d_vec = getInputTensorQParamOpFusionInfo(
855
+ "aten::upsample_bilinear2d",
856
+ {"%output_size", "%align_corners", "%scale_factors"});
857
+
858
+ auto upsample_trilinear3d_vec = getInputTensorQParamOpFusionInfo(
859
+ "aten::upsample_trilinear3d",
860
+ {"%output_size", "%align_corners", "%scale_factors"});
861
+
862
+ auto upsample_nearest1d = getInputTensorQParamOpFusionInfo(
863
+ "aten::upsample_nearest1d", {"%output_size", "%scales"});
864
+
865
+ auto upsample_nearest2d = getInputTensorQParamOpFusionInfo(
866
+ "aten::upsample_nearest2d", {"%output_size", "%scale_h", "%scale_w"});
867
+
868
+ auto upsample_nearest3d = getInputTensorQParamOpFusionInfo(
869
+ "aten::upsample_nearest3d",
870
+ {"%output_size", "%scale_d", "%scale_h", "%scale_w"});
871
+
872
+ auto upsample_linear1d = getInputTensorQParamOpFusionInfo(
873
+ "aten::upsample_linear1d", {"%output_size", "%align_corners", "%scales"});
874
+
875
+ auto upsample_bilinear2d = getInputTensorQParamOpFusionInfo(
876
+ "aten::upsample_bilinear2d",
877
+ {"%output_size", "%align_corners", "%scale_h", "%scale_w"});
878
+
879
+ auto upsample_trilinear3d = getInputTensorQParamOpFusionInfo(
880
+ "aten::upsample_trilinear3d",
881
+ {"%output_size", "%align_corners", "%scale_d", "%scale_h", "%scale_w"});
882
+
883
+ auto clamp = getClampOpFusionInfo("aten::clamp", {"%min", "%max"});
884
+
885
+ auto hardtanh = getClampOpFusionInfo("aten::hardtanh", {"%min", "%max"});
886
+
887
+ auto hardtanh_ = getClampOpFusionInfo("aten::hardtanh_", {"%min", "%max"});
888
+
889
+ auto leaky_relu =
890
+ getInputTensorQParamOpFusionInfo("aten::leaky_relu", {"%negative_slope"});
891
+
892
+ auto leaky_relu_ = getInputTensorQParamOpFusionInfo(
893
+ "aten::leaky_relu_", {"%negative_slope"});
894
+
895
+ // Ops with fixed quantization parameters
896
+ auto hardsigmoid = getFixedQParamOpFusionInfo("aten::hardsigmoid", {}, false);
897
+
898
+ auto hardsigmoid_ =
899
+ getFixedQParamOpFusionInfo("aten::hardsigmoid_", {}, false);
900
+
901
+ auto sigmoid = getFixedQParamOpFusionInfo("aten::sigmoid", {}, false);
902
+
903
+ auto sigmoid_ = getFixedQParamOpFusionInfo("aten::sigmoid_", {}, false);
904
+
905
+ auto tanh = getFixedQParamOpFusionInfo("aten::tanh", {}, true);
906
+
907
+ auto tanh_ = getFixedQParamOpFusionInfo("aten::tanh_", {}, true);
908
+
909
+ auto hardswish = getObservedQParamOpFusionInfo(
910
+ "aten::hardswish", "quantized::hardswish", {}, {});
911
+
912
+ auto hardswish_ = getObservedQParamOpFusionInfo(
913
+ "aten::hardswish_", "quantized::hardswish", {}, {});
914
+
915
+ auto layer_norm = getObservedQParamOpFusionInfo(
916
+ "aten::layer_norm",
917
+ "quantized::layer_norm",
918
+ {"%normalized_shape", "%weight", "%bias", "%eps", "%cudnn_enabled"},
919
+ {"%normalized_shape", "%weight", "%bias", "%eps"});
920
+
921
+ auto group_norm = getObservedQParamOpFusionInfo(
922
+ "aten::group_norm",
923
+ "quantized::group_norm",
924
+ {"%num_groups", "%weight", "%bias", "%eps", "%cudnn_enabled"},
925
+ {"%num_groups", "%weight", "%bias", "%eps"});
926
+
927
+ auto instance_norm = getObservedQParamOpFusionInfo(
928
+ "aten::instance_norm",
929
+ "quantized::instance_norm",
930
+ {"%weight",
931
+ "%bias",
932
+ "%running_mean",
933
+ "%running_var",
934
+ "%use_input_stats",
935
+ "%momentum",
936
+ "%eps",
937
+ "%cudnn_enabled"},
938
+ {"%weight", "%bias", "%eps"});
939
+
940
+ return {
941
+ {"quantized::conv1d", std::move(conv1d), std::move(quantized_conv1d)},
942
+ {"quantized::conv1d_relu", std::move(conv1d_relu), quantized_conv1d_relu},
943
+ {"quantized::conv1d_relu",
944
+ std::move(conv1d_inplace_relu),
945
+ std::move(quantized_conv1d_relu)},
946
+ {"quantized::conv2d", std::move(conv2d), std::move(quantized_conv2d)},
947
+ {"quantized::conv2d_relu", std::move(conv2d_relu), quantized_conv2d_relu},
948
+ {"quantized::conv2d_relu",
949
+ std::move(conv2d_inplace_relu),
950
+ std::move(quantized_conv2d_relu)},
951
+ {"quantized::conv3d", std::move(conv3d), std::move(quantized_conv3d)},
952
+ {"quantized::conv3d_relu", std::move(conv3d_relu), quantized_conv3d_relu},
953
+ {"quantized::conv3d_relu",
954
+ std::move(conv3d_inplace_relu),
955
+ std::move(quantized_conv3d_relu)},
956
+ {"quantized::conv_transpose1d",
957
+ std::move(conv_transpose1d),
958
+ std::move(quantized_conv_transpose1d)},
959
+ {"quantized::conv_transpose2d",
960
+ std::move(conv_transpose2d),
961
+ std::move(quantized_conv_transpose2d)},
962
+ {"quantized::linear", std::move(linear), std::move(quantized_linear)},
963
+ {"quantized::linear_relu", std::move(linear_relu), quantized_linear_relu},
964
+ {"quantized::linear_relu",
965
+ std::move(linear_inplace_relu),
966
+ std::move(quantized_linear_relu)},
967
+ {"quantized::add_relu",
968
+ std::move(add_relu),
969
+ quantized_add_relu,
970
+ {aten_add_alpha_is_one}},
971
+ {"quantized::add_relu",
972
+ std::move(add_inplace_relu),
973
+ quantized_add_relu,
974
+ {aten_add_alpha_is_one}},
975
+ {"quantized::add_relu",
976
+ std::move(inplace_add_relu),
977
+ quantized_add_relu,
978
+ {aten_add_alpha_is_one}},
979
+ {"quantized::add_relu",
980
+ std::move(inplace_add_inplace_relu),
981
+ std::move(quantized_add_relu),
982
+ {aten_add_alpha_is_one}},
983
+ std::move(add_scalar),
984
+ std::move(add_scalar_out),
985
+ // note that these must come after quantized::add_scalar and
986
+ // quantized::add_scalar_out patterns
987
+ {"quantized::add_scalar_relu",
988
+ quantized_add_scalar_relu_pattern,
989
+ quantized_add_scalar_relu_replacement},
990
+ {"quantized::add_scalar_relu",
991
+ quantized_add_scalar_inplace_relu_pattern,
992
+ quantized_add_scalar_relu_replacement},
993
+ {"quantized::add_scalar_relu_out",
994
+ quantized_add_scalar_relu_out_pattern,
995
+ quantized_add_scalar_relu_out_replacement},
996
+ {"quantized::add_scalar_relu_out",
997
+ quantized_add_scalar_inplace_relu_out_pattern,
998
+ quantized_add_scalar_relu_out_replacement},
999
+ {"quantized::add",
1000
+ std::move(add),
1001
+ quantized_add,
1002
+ {aten_add_alpha_is_one}},
1003
+ {"quantized::add",
1004
+ std::move(inplace_add),
1005
+ std::move(quantized_add),
1006
+ {aten_add_alpha_is_one}},
1007
+ {"quantized::cat", std::move(cat), std::move(quantized_cat)},
1008
+ {"quantized::batch_norm",
1009
+ std::move(batch_norm),
1010
+ std::move(quantized_batch_norm)},
1011
+ {"quantized::batch_norm_relu",
1012
+ std::move(batch_norm_relu),
1013
+ quantized_batch_norm_relu},
1014
+ {"quantized::batch_norm_relu",
1015
+ std::move(batch_norm_inplace_relu),
1016
+ std::move(quantized_batch_norm_relu)},
1017
+ std::move(mul_scalar),
1018
+ std::move(mul_scalar_out),
1019
+ // note that these must come after quantized::mul_scalar and
1020
+ // quantized::mul_scalar_out patterns
1021
+ {"quantized::mul_scalar_relu",
1022
+ quantized_mul_scalar_relu_pattern,
1023
+ quantized_mul_scalar_relu_replacement},
1024
+ {"quantized::mul_scalar_relu",
1025
+ quantized_mul_scalar_inplace_relu_pattern,
1026
+ quantized_mul_scalar_relu_replacement},
1027
+ {"quantized::mul_scalar_relu_out",
1028
+ quantized_mul_scalar_relu_out_pattern,
1029
+ quantized_mul_scalar_relu_out_replacement},
1030
+ {"quantized::mul_scalar_relu_out",
1031
+ quantized_mul_scalar_inplace_relu_out_pattern,
1032
+ quantized_mul_scalar_relu_out_replacement},
1033
+ {"quantized::mul_relu", std::move(mul_relu), quantized_mul_relu},
1034
+ {"quantized::mul_relu", std::move(mul_inplace_relu), quantized_mul_relu},
1035
+ {"quantized::mul_relu", std::move(inplace_mul_relu), quantized_mul_relu},
1036
+ {"quantized::mul_relu",
1037
+ std::move(inplace_mul_inplace_relu),
1038
+ std::move(quantized_mul_relu)},
1039
+ {"quantized::mul", std::move(mul), quantized_mul},
1040
+ {"quantized::mul", std::move(inplace_mul), std::move(quantized_mul)},
1041
+ std::move(hardswish),
1042
+ std::move(hardswish_),
1043
+ std::move(layer_norm),
1044
+ std::move(group_norm),
1045
+ std::move(instance_norm),
1046
+ {"quantized::elu", std::move(elu), quantized_elu},
1047
+ {"quantized::elu_", std::move(elu_), std::move(quantized_elu)},
1048
+ std::move(avg_pool1d),
1049
+ std::move(avg_pool2d),
1050
+ std::move(avg_pool3d),
1051
+ std::move(adaptive_avg_pool1d),
1052
+ std::move(adaptive_avg_pool2d),
1053
+ std::move(adaptive_avg_pool3d),
1054
+ std::move(mean1),
1055
+ std::move(mean2),
1056
+ std::move(upsample_nearest1d),
1057
+ std::move(upsample_nearest2d),
1058
+ std::move(upsample_nearest3d),
1059
+ std::move(upsample_linear1d),
1060
+ std::move(upsample_bilinear2d),
1061
+ std::move(upsample_trilinear3d),
1062
+ std::move(upsample_nearest1d_vec),
1063
+ std::move(upsample_nearest2d_vec),
1064
+ std::move(upsample_nearest3d_vec),
1065
+ std::move(upsample_linear1d_vec),
1066
+ std::move(upsample_bilinear2d_vec),
1067
+ std::move(upsample_trilinear3d_vec),
1068
+ std::move(clamp),
1069
+ std::move(hardtanh),
1070
+ std::move(hardtanh_),
1071
+ std::move(leaky_relu),
1072
+ std::move(leaky_relu_),
1073
+ // fixed qparam ops
1074
+ std::move(hardsigmoid),
1075
+ std::move(hardsigmoid_),
1076
+ std::move(sigmoid),
1077
+ std::move(sigmoid_),
1078
+ std::move(tanh),
1079
+ std::move(tanh_),
1080
+ };
1081
+ }
1082
+
1083
+ inline std::vector<QuantFusionInfo>
1084
+ dynamic_quantized_linear_pattern_and_replacements() {
1085
+ std::string linear_dynamic = R"(
1086
+ graph(%packed_params, %a):
1087
+ %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params)
1088
+ %w_dequant = aten::dequantize(%w_quant)
1089
+ %r = aten::linear(%a, %w_dequant, %b)
1090
+ return (%r) )";
1091
+
1092
+ // This pattern ignores reduce range
1093
+ // Set the reduce range to default to true, since qnnpack backend ignores this
1094
+ // argument.
1095
+ std::string quantized_linear_dynamic = R"(
1096
+ graph(%packed_params, %a):
1097
+ %reduce_range : bool = prim::Constant[value=1]()
1098
+ %r = quantized::linear_dynamic(%a, %packed_params, %reduce_range)
1099
+ return (%r) )";
1100
+
1101
+ return {
1102
+ {"quantized::linear_dynamic",
1103
+ std::move(linear_dynamic),
1104
+ std::move(quantized_linear_dynamic)},
1105
+ };
1106
+ }
1107
+
1108
+ static std::vector<QuantFusionInfo>
1109
+ dynamic_quant_fusion_pattern_and_replacements() {
1110
+ std::string linear_dynamic = R"(
1111
+ graph(%packed_params, %a, %reduce_range, %a_dtype):
1112
+ %a_scale : float, %a_zero_point : int = aten::_choose_qparams_per_tensor(%a, %reduce_range)
1113
+ %a_quant = aten::quantize_per_tensor(%a, %a_scale, %a_zero_point, %a_dtype)
1114
+ %a_dequant = aten::dequantize(%a_quant)
1115
+ %w_quant : Tensor, %b : Tensor? = quantized::linear_unpack(%packed_params)
1116
+ %w_dequant = aten::dequantize(%w_quant)
1117
+ %r = aten::linear(%a_dequant, %w_dequant, %b)
1118
+ return (%r) )";
1119
+
1120
+ std::string quantized_linear_dynamic = R"(
1121
+ graph(%packed_params, %a, %reduce_range, %a_dtype):
1122
+ %r = quantized::linear_dynamic(%a, %packed_params, %reduce_range)
1123
+ return (%r) )";
1124
+
1125
+ std::string linear_dynamic_fp16 = R"(
1126
+ graph(%packed_params, %a):
1127
+ %w_unpacked : Tensor, %b : Tensor? = quantized::linear_unpack_fp16(%packed_params)
1128
+ %r = aten::linear(%a, %w_unpacked, %b)
1129
+ return (%r) )";
1130
+
1131
+ std::string quantized_linear_dynamic_fp16 = R"(
1132
+ graph(%packed_params, %a):
1133
+ %r = quantized::linear_dynamic_fp16(%a, %packed_params)
1134
+ return (%r) )";
1135
+
1136
+ return {
1137
+ {"quantized::linear_dynamic",
1138
+ std::move(linear_dynamic),
1139
+ std::move(quantized_linear_dynamic)},
1140
+ {"quantized::linear_dynamic_fp16",
1141
+ std::move(linear_dynamic_fp16),
1142
+ std::move(quantized_linear_dynamic_fp16)},
1143
+ };
1144
+ }
1145
+
1146
+ static std::vector<QuantFusionInfo> linear_prepack_unpack_patterns() {
1147
+ std::string linear_with_quant = R"(
1148
+ graph(%a_dequant, %w_quant, %b):
1149
+ %w_dequant = aten::dequantize(%w_quant)
1150
+ %r = aten::linear(%a_dequant, %w_dequant, %b)
1151
+ return (%r) )";
1152
+
1153
+ std::string linear_with_quant_prepack = R"(
1154
+ graph(%a_dequant, %w_quant, %b):
1155
+ %packed_params = quantized::linear_prepack(%w_quant, %b)
1156
+ %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::linear_unpack(%packed_params)
1157
+ %w_dequant = aten::dequantize(%w_quant_unpacked)
1158
+ %r = aten::linear(%a_dequant, %w_dequant, %b_unpacked)
1159
+ return (%r) )";
1160
+ std::string linear_fp16_with_cast = R"(
1161
+ graph(%w, %a_dq, %b):
1162
+ %fp16_tensor = aten::_saturate_weight_to_fp16(%w)
1163
+ %r = aten::linear(%a_dq, %fp16_tensor, %b)
1164
+ return (%r) )";
1165
+ std::string linear_fp16_with_prepack = R"(
1166
+ graph(%w, %a_dq, %b):
1167
+ %packed_params = quantized::linear_prepack_fp16(%w, %b)
1168
+ %w_unpacked : Tensor, %b_unpacked : Tensor? = quantized::linear_unpack_fp16(%packed_params)
1169
+ %r = aten::linear(%a_dq, %w_unpacked, %b_unpacked)
1170
+ return (%r) )";
1171
+
1172
+ return {
1173
+ {"linear_prepack_unpack",
1174
+ std::move(linear_with_quant),
1175
+ std::move(linear_with_quant_prepack)},
1176
+ {"linear_fp16_prepack_unpack",
1177
+ std::move(linear_fp16_with_cast),
1178
+ std::move(linear_fp16_with_prepack)},
1179
+ };
1180
+ }
1181
+
1182
+ static std::vector<QuantFusionInfo> conv_prepack_unpack_patterns() {
1183
+ std::string conv1d_with_quant = R"(
1184
+ graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups):
1185
+ %w_dequant = aten::dequantize(%w_quant)
1186
+ %r = aten::conv1d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
1187
+ return (%r) )";
1188
+
1189
+ std::string conv1d_with_quant_prepack = R"(
1190
+ graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups):
1191
+ %packed_params : __torch__.torch.classes.quantized.Conv2dPackedParamsBase = quantized::conv1d_prepack(%w_quant, %b, %stride, %padding, %dilation, %groups)
1192
+ %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::conv1d_unpack(%packed_params)
1193
+ %w_dequant = aten::dequantize(%w_quant_unpacked)
1194
+ %r = aten::conv1d(%a_dequant, %w_dequant, %b_unpacked, %stride, %padding, %dilation, %groups)
1195
+ return (%r) )";
1196
+
1197
+ std::string conv2d_with_quant = R"(
1198
+ graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups):
1199
+ %w_dequant = aten::dequantize(%w_quant)
1200
+ %r = aten::conv2d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
1201
+ return (%r) )";
1202
+
1203
+ std::string conv2d_with_quant_prepack = R"(
1204
+ graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups):
1205
+ %packed_params : __torch__.torch.classes.quantized.Conv2dPackedParamsBase = quantized::conv2d_prepack(%w_quant, %b, %stride, %padding, %dilation, %groups)
1206
+ %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::conv2d_unpack(%packed_params)
1207
+ %w_dequant = aten::dequantize(%w_quant_unpacked)
1208
+ %r = aten::conv2d(%a_dequant, %w_dequant, %b_unpacked, %stride, %padding, %dilation, %groups)
1209
+ return (%r) )";
1210
+
1211
+ std::string conv3d_with_quant = R"(
1212
+ graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups):
1213
+ %w_dequant = aten::dequantize(%w_quant)
1214
+ %r = aten::conv3d(%a_dequant, %w_dequant, %b, %stride, %padding, %dilation, %groups)
1215
+ return (%r) )";
1216
+
1217
+ std::string conv3d_with_quant_prepack = R"(
1218
+ graph(%a_dequant, %w_quant, %b, %stride, %padding, %dilation, %groups):
1219
+ %packed_params : __torch__.torch.classes.quantized.Conv3dPackedParamsBase = quantized::conv3d_prepack(%w_quant, %b, %stride, %padding, %dilation, %groups)
1220
+ %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::conv3d_unpack(%packed_params)
1221
+ %w_dequant = aten::dequantize(%w_quant_unpacked)
1222
+ %r = aten::conv3d(%a_dequant, %w_dequant, %b_unpacked, %stride, %padding, %dilation, %groups)
1223
+ return (%r) )";
1224
+
1225
+ std::string conv_transpose1d_with_quant = R"(
1226
+ graph(%a_dequant, %w_quant, %b, %stride, %padding, %output_padding, %groups, %dilation):
1227
+ %w_dequant = aten::dequantize(%w_quant)
1228
+ %r = aten::conv_transpose1d(%a_dequant, %w_dequant, %b, %stride, %padding, %output_padding, %groups, %dilation)
1229
+ return (%r) )";
1230
+
1231
+ std::string conv_transpose1d_with_quant_prepack = R"(
1232
+ graph(%a_dequant, %w_quant, %b, %stride, %padding, %output_padding, %groups, %dilation):
1233
+ %packed_params : __torch__.torch.classes.quantized.Conv2dPackedParamsBase = quantized::conv_transpose1d_prepack(%w_quant, %b, %stride, %padding, %output_padding, %dilation, %groups)
1234
+ %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::conv_transpose1d_unpack(%packed_params)
1235
+ %w_dequant = aten::dequantize(%w_quant_unpacked)
1236
+ %r = aten::conv_transpose1d(%a_dequant, %w_dequant, %b_unpacked, %stride, %padding, %output_padding, %groups, %dilation)
1237
+ return (%r) )";
1238
+
1239
+ std::string conv_transpose2d_with_quant = R"(
1240
+ graph(%a_dequant, %w_quant, %b, %stride, %padding, %output_padding, %groups, %dilation):
1241
+ %w_dequant = aten::dequantize(%w_quant)
1242
+ %r = aten::conv_transpose2d(%a_dequant, %w_dequant, %b, %stride, %padding, %output_padding, %groups, %dilation)
1243
+ return (%r) )";
1244
+
1245
+ std::string conv_transpose2d_with_quant_prepack = R"(
1246
+ graph(%a_dequant, %w_quant, %b, %stride, %padding, %output_padding, %groups, %dilation):
1247
+ %packed_params : __torch__.torch.classes.quantized.Conv2dPackedParamsBase = quantized::conv_transpose2d_prepack(%w_quant, %b, %stride, %padding, %output_padding, %dilation, %groups)
1248
+ %w_quant_unpacked : Tensor, %b_unpacked : Tensor? = quantized::conv_transpose2d_unpack(%packed_params)
1249
+ %w_dequant = aten::dequantize(%w_quant_unpacked)
1250
+ %r = aten::conv_transpose2d(%a_dequant, %w_dequant, %b_unpacked, %stride, %padding, %output_padding, %groups, %dilation)
1251
+ return (%r) )";
1252
+
1253
+ return {
1254
+ {"conv1d_prepack_unpack",
1255
+ std::move(conv1d_with_quant),
1256
+ std::move(conv1d_with_quant_prepack)},
1257
+ {"conv2d_prepack_unpack",
1258
+ std::move(conv2d_with_quant),
1259
+ std::move(conv2d_with_quant_prepack)},
1260
+ {"conv3d_prepack_unpack",
1261
+ std::move(conv3d_with_quant),
1262
+ std::move(conv3d_with_quant_prepack)},
1263
+ {"conv_transpose1d_prepack_unpack",
1264
+ std::move(conv_transpose1d_with_quant),
1265
+ std::move(conv_transpose1d_with_quant_prepack)},
1266
+ {"conv_transpose2d_prepack_unpack",
1267
+ std::move(conv_transpose2d_with_quant),
1268
+ std::move(conv_transpose2d_with_quant_prepack)}};
1269
+ }
1270
+
1271
+ } // namespace jit
1272
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/quantization_type.h ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <cstdint>
3
+ #include <ostream>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // Quantization type (dynamic quantization, static quantization).
9
+ // Should match the Python enum in quantize_jit.py
10
+ enum QuantType : std::uint8_t { DYNAMIC = 0, STATIC };
11
+
12
+ std::ostream& operator<<(std::ostream& os, QuantType t);
13
+
14
+ } // namespace jit
15
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/quantization/register_packed_params.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/api/module.h>
4
+ #include <torch/csrc/jit/ir/ir.h>
5
+ #include <memory>
6
+
7
+ namespace torch {
8
+ namespace jit {
9
+
10
+ using PrePackParamFilterFn = std::function<bool(Node*)>;
11
+
12
+ TORCH_API std::unordered_set<std::string> RegisterPrePackParams(
13
+ Module& m,
14
+ const std::string& method_name,
15
+ const PrePackParamFilterFn& is_packed_param,
16
+ const std::string& attr_prefix);
17
+
18
+ TORCH_API std::string joinPaths(const std::vector<std::string>& paths);
19
+ } // namespace jit
20
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/specialize_autogradzero.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+
5
+ namespace torch {
6
+ namespace jit {
7
+
8
+ // propagate autograd zero information through a gradient graph and
9
+ // remove grad_of blocks if present.
10
+ // Note: this is a very limited pass. It only propagates autograd zeros for
11
+ // operations generated by the symbolic autodiff code and cleans up
12
+ // AutogradAdds when possible. Outputs of other nodes are conservatively
13
+ // marked Unknown and not optimized.
14
+ TORCH_API void specializeAutogradZero(std::shared_ptr<Graph> g);
15
+
16
+ struct ProfilingRecord;
17
+
18
+ TORCH_API void InsertProfileNodesForSpecializeAutogradZero(ProfilingRecord* pr);
19
+
20
+ } // namespace jit
21
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_cache.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/jit/ir/ir.h>
4
+ #include <torch/csrc/jit/passes/symbolic_shape_analysis.h>
5
+
6
+ namespace torch {
7
+ namespace jit {
8
+
9
+ struct TORCH_API CanonicalizedSymbolicShape {
10
+ // TODO: Consider in the future if it is reasonable to
11
+ // merge code with SymbolicShape or VaryingShape while keeping
12
+ // the two not implicitly convertable (and cause bugs).
13
+ CanonicalizedSymbolicShape(
14
+ const c10::SymbolicShape& orig_shape,
15
+ std::unordered_map<int64_t, int64_t>& ss_map) {
16
+ init(orig_shape, ss_map);
17
+ }
18
+
19
+ CanonicalizedSymbolicShape(c10::SymbolicShape& orig_shape) {
20
+ std::unordered_map<int64_t, int64_t> new_ssmap;
21
+ init(orig_shape, new_ssmap);
22
+ }
23
+
24
+ size_t hash() const;
25
+
26
+ c10::SymbolicShape toSymbolicShape(
27
+ std::unordered_map<int64_t, int64_t>& inverse_ss_map) const;
28
+
29
+ TORCH_API friend bool operator==(
30
+ const CanonicalizedSymbolicShape& a,
31
+ const CanonicalizedSymbolicShape& b);
32
+
33
+ private:
34
+ c10::optional<std::vector<int64_t>> values_;
35
+
36
+ void init(
37
+ const c10::SymbolicShape& orig_shape,
38
+ std::unordered_map<int64_t, int64_t>& ss_map);
39
+ };
40
+
41
+ // SHAPE CACHE API
42
+ TORCH_API c10::optional<std::vector<at::SymbolicShape>>
43
+ get_cached_shape_function(
44
+ const FunctionSchema* schema,
45
+ const std::vector<SSAInput>& arg_vec);
46
+
47
+ TORCH_API void cache_shape_function(
48
+ const FunctionSchema* schema,
49
+ const std::vector<SSAInput>& arg_vec,
50
+ const std::vector<at::SymbolicShape>& ret_vec);
51
+
52
+ // For use in test code
53
+ TORCH_API void clear_shape_cache();
54
+ TORCH_API size_t get_shape_cache_size();
55
+
56
+ } // namespace jit
57
+ } // namespace torch
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/value_refinement_utils.h ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/jit_type.h>
4
+ #include <torch/csrc/jit/ir/alias_analysis.h>
5
+ #include <torch/csrc/jit/ir/ir_views.h>
6
+ #include <torch/csrc/jit/jit_log.h>
7
+ #include <torch/csrc/jit/passes/dead_code_elimination.h>
8
+ #include <torch/csrc/jit/passes/peephole.h>
9
+ #include <torch/csrc/jit/passes/peephole_list_idioms.h>
10
+ #include <torch/csrc/jit/runtime/graph_executor.h>
11
+
12
+ namespace torch {
13
+ namespace jit {
14
+
15
+ // Refine from Value of type List -> len of list
16
+ // If a refinement mapping of List Value * -> len is present in a block
17
+ // the list is guaranteed to be that length
18
+ // TODO: vector may be faster
19
+ using ListRefinement = std::unordered_map<Value*, int64_t>;
20
+
21
+ TORCH_API ListRefinement
22
+ intersectRefinements(const ListRefinement& ref1, const ListRefinement& ref2);
23
+
24
+ TORCH_API ListRefinement
25
+ unionRefinements(const ListRefinement& ref1, const ListRefinement& ref2);
26
+
27
+ // Represents the refinement information that can be carried on a boolean
28
+ struct BooleanRefinementMapping {
29
+ BooleanRefinementMapping(
30
+ ListRefinement true_refine,
31
+ ListRefinement false_refine)
32
+ : true_refine_(std::move(true_refine)),
33
+ false_refine_(std::move(false_refine)){};
34
+ BooleanRefinementMapping() = default; // empty
35
+
36
+ static BooleanRefinementMapping FalseRefinements(
37
+ ListRefinement false_refine) {
38
+ return BooleanRefinementMapping({}, std::move(false_refine));
39
+ }
40
+
41
+ static BooleanRefinementMapping TrueRefinements(ListRefinement true_refine) {
42
+ return BooleanRefinementMapping(std::move(true_refine), {});
43
+ }
44
+
45
+ BooleanRefinementMapping intersectBooleanRefinementMapping(
46
+ BooleanRefinementMapping& other) {
47
+ return BooleanRefinementMapping(
48
+ intersectRefinements(true_refine_, other.true_refine()),
49
+ intersectRefinements(false_refine_, other.false_refine()));
50
+ }
51
+
52
+ ListRefinement& true_refine() {
53
+ return true_refine_;
54
+ }
55
+
56
+ ListRefinement& false_refine() {
57
+ return false_refine_;
58
+ }
59
+
60
+ private:
61
+ ListRefinement true_refine_;
62
+ ListRefinement false_refine_;
63
+ };
64
+
65
+ TORCH_API void joinIfRefinements(
66
+ Node* if_node,
67
+ std::unordered_set<Block*>& throwing_blocks,
68
+ ListRefinement& curr_block_refinements,
69
+ ListRefinement& true_block_refinements,
70
+ ListRefinement& false_block_refinements,
71
+ std::unordered_map<Value*, BooleanRefinementMapping>& info);
72
+
73
+ // handles adding blocks to throwing blocks and propagating refinements via
74
+ // boolean comparisons
75
+ TORCH_API bool handleCommonRefinentOperators(
76
+ Node* n,
77
+ std::unordered_set<Block*>& throwing_blocks,
78
+ std::unordered_map<Value*, BooleanRefinementMapping>& info);
79
+
80
+ } // namespace jit
81
+ } // namespace torch
vllm/lib/python3.10/site-packages/cupy/_manipulation/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Functions from the following NumPy document
2
+ # https://numpy.org/doc/stable/reference/routines.array-manipulation.html
vllm/lib/python3.10/site-packages/cupy/_manipulation/join.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cupy
2
+ from cupy import _core
3
+
4
+
5
+ def column_stack(tup):
6
+ """Stacks 1-D and 2-D arrays as columns into a 2-D array.
7
+
8
+ A 1-D array is first converted to a 2-D column array. Then, the 2-D arrays
9
+ are concatenated along the second axis.
10
+
11
+ Args:
12
+ tup (sequence of arrays): 1-D or 2-D arrays to be stacked.
13
+
14
+ Returns:
15
+ cupy.ndarray: A new 2-D array of stacked columns.
16
+
17
+ .. seealso:: :func:`numpy.column_stack`
18
+
19
+ """
20
+ if any(not isinstance(a, cupy.ndarray) for a in tup):
21
+ raise TypeError('Only cupy arrays can be column stacked')
22
+
23
+ lst = list(tup)
24
+ for i, a in enumerate(lst):
25
+ if a.ndim == 1:
26
+ a = a[:, cupy.newaxis]
27
+ lst[i] = a
28
+ elif a.ndim != 2:
29
+ raise ValueError(
30
+ 'Only 1 or 2 dimensional arrays can be column stacked')
31
+
32
+ return concatenate(lst, axis=1)
33
+
34
+
35
+ def concatenate(tup, axis=0, out=None, *, dtype=None, casting='same_kind'):
36
+ """Joins arrays along an axis.
37
+
38
+ Args:
39
+ tup (sequence of arrays): Arrays to be joined. All of these should have
40
+ same dimensionalities except the specified axis.
41
+ axis (int or None): The axis to join arrays along.
42
+ If axis is None, arrays are flattened before use.
43
+ Default is 0.
44
+ out (cupy.ndarray): Output array.
45
+ dtype (str or dtype): If provided, the destination array will have this
46
+ dtype. Cannot be provided together with ``out``.
47
+ casting ({‘no’, ‘equiv’, ‘safe’, ‘same_kind’, ‘unsafe’}, optional):
48
+ Controls what kind of data casting may occur. Defaults to
49
+ ``'same_kind'``.
50
+
51
+ Returns:
52
+ cupy.ndarray: Joined array.
53
+
54
+ .. seealso:: :func:`numpy.concatenate`
55
+
56
+ """
57
+ if axis is None:
58
+ tup = [m.ravel() for m in tup]
59
+ axis = 0
60
+ return _core.concatenate_method(tup, axis, out, dtype, casting)
61
+
62
+
63
+ def dstack(tup):
64
+ """Stacks arrays along the third axis.
65
+
66
+ Args:
67
+ tup (sequence of arrays): Arrays to be stacked. Each array is converted
68
+ by :func:`cupy.atleast_3d` before stacking.
69
+
70
+ Returns:
71
+ cupy.ndarray: Stacked array.
72
+
73
+ .. seealso:: :func:`numpy.dstack`
74
+
75
+ """
76
+ return concatenate([cupy.atleast_3d(m) for m in tup], 2)
77
+
78
+
79
+ def hstack(tup, *, dtype=None, casting='same_kind'):
80
+ """Stacks arrays horizontally.
81
+
82
+ If an input array has one dimension, then the array is treated as a
83
+ horizontal vector and stacked along the first axis. Otherwise, the array is
84
+ stacked along the second axis.
85
+
86
+ Args:
87
+ tup (sequence of arrays): Arrays to be stacked.
88
+ dtype (str or dtype): If provided, the destination array will have this
89
+ dtype.
90
+ casting ({‘no’, ‘equiv’, ‘safe’, ‘same_kind’, ‘unsafe’}, optional):
91
+ Controls what kind of data casting may occur. Defaults to
92
+ ``'same_kind'``.
93
+
94
+ Returns:
95
+ cupy.ndarray: Stacked array.
96
+
97
+ .. seealso:: :func:`numpy.hstack`
98
+
99
+ """
100
+ arrs = [cupy.atleast_1d(a) for a in tup]
101
+ axis = 1
102
+ if arrs[0].ndim == 1:
103
+ axis = 0
104
+ return concatenate(arrs, axis, dtype=dtype, casting=casting)
105
+
106
+
107
+ def vstack(tup, *, dtype=None, casting='same_kind'):
108
+ """Stacks arrays vertically.
109
+
110
+ If an input array has one dimension, then the array is treated as a
111
+ horizontal vector and stacked along the additional axis at the head.
112
+ Otherwise, the array is stacked along the first axis.
113
+
114
+ Args:
115
+ tup (sequence of arrays): Arrays to be stacked. Each array is converted
116
+ by :func:`cupy.atleast_2d` before stacking.
117
+ dtype (str or dtype): If provided, the destination array will have this
118
+ dtype.
119
+ casting ({‘no’, ‘equiv’, ‘safe’, ‘same_kind’, ‘unsafe’}, optional):
120
+ Controls what kind of data casting may occur. Defaults to
121
+ ``'same_kind'``.
122
+
123
+ Returns:
124
+ cupy.ndarray: Stacked array.
125
+
126
+ .. seealso:: :func:`numpy.dstack`
127
+
128
+ """
129
+ return concatenate([cupy.atleast_2d(m) for m in tup], 0,
130
+ dtype=dtype, casting=casting)
131
+
132
+
133
+ def stack(tup, axis=0, out=None, *, dtype=None, casting='same_kind'):
134
+ """Stacks arrays along a new axis.
135
+
136
+ Args:
137
+ tup (sequence of arrays): Arrays to be stacked.
138
+ axis (int): Axis along which the arrays are stacked.
139
+ out (cupy.ndarray): Output array.
140
+ dtype (str or dtype): If provided, the destination array will have this
141
+ dtype. Cannot be provided together with ``out``.
142
+ casting ({‘no’, ‘equiv’, ‘safe’, ‘same_kind’, ‘unsafe’}, optional):
143
+ Controls what kind of data casting may occur. Defaults to
144
+ ``'same_kind'``.
145
+
146
+ Returns:
147
+ cupy.ndarray: Stacked array.
148
+
149
+ .. seealso:: :func:`numpy.stack`
150
+ """
151
+ return concatenate([cupy.expand_dims(x, axis) for x in tup], axis, out,
152
+ dtype=dtype, casting=casting)
vllm/lib/python3.10/site-packages/cupy/_manipulation/kind.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cupy
2
+ from cupy import _core
3
+
4
+
5
+ def asarray_chkfinite(a, dtype=None, order=None):
6
+ """Converts the given input to an array,
7
+ and raises an error if the input contains NaNs or Infs.
8
+
9
+ Args:
10
+ a: array like.
11
+ dtype: data type, optional
12
+ order: {'C', 'F', 'A', 'K'}, optional
13
+
14
+ Returns:
15
+ cupy.ndarray: An array on the current device.
16
+
17
+ .. note::
18
+ This function performs device synchronization.
19
+
20
+ .. seealso:: :func:`numpy.asarray_chkfinite`
21
+
22
+ """
23
+
24
+ a = cupy.asarray(a, dtype=dtype, order=order)
25
+ if not cupy.isfinite(a).all():
26
+ raise ValueError("array must not contain Infs or NaNs")
27
+ return a
28
+
29
+
30
+ def asfarray(a, dtype=cupy.float64):
31
+ """Converts array elements to float type.
32
+
33
+ Args:
34
+ a (cupy.ndarray): Source array.
35
+ dtype: str or dtype object, optional
36
+
37
+ Returns:
38
+ cupy.ndarray: The input array ``a`` as a float ndarray.
39
+
40
+ .. seealso:: :func:`numpy.asfarray`
41
+
42
+ """
43
+ if not cupy.issubdtype(dtype, cupy.inexact):
44
+ dtype = cupy.float64
45
+ return cupy.asarray(a, dtype=dtype)
46
+
47
+
48
+ def asfortranarray(a, dtype=None):
49
+ """Return an array laid out in Fortran order in memory.
50
+
51
+ Args:
52
+ a (~cupy.ndarray): The input array.
53
+ dtype (str or dtype object, optional): By default, the data-type is
54
+ inferred from the input data.
55
+
56
+ Returns:
57
+ ~cupy.ndarray: The input `a` in Fortran, or column-major, order.
58
+
59
+ .. seealso:: :func:`numpy.asfortranarray`
60
+
61
+ """
62
+ return _core.asfortranarray(a, dtype)
63
+
64
+
65
+ def require(a, dtype=None, requirements=None):
66
+ """Return an array which satisfies the requirements.
67
+
68
+ Args:
69
+ a (~cupy.ndarray): The input array.
70
+ dtype (str or dtype object, optional): The required data-type.
71
+ If None preserve the current dtype.
72
+ requirements (str or list of str): The requirements can be any
73
+ of the following
74
+
75
+ * 'F_CONTIGUOUS' ('F', 'FORTRAN') - ensure a Fortran-contiguous \
76
+ array. \
77
+
78
+ * 'C_CONTIGUOUS' ('C', 'CONTIGUOUS') - ensure a C-contiguous array.
79
+
80
+ * 'OWNDATA' ('O') - ensure an array that owns its own data.
81
+
82
+ Returns:
83
+ ~cupy.ndarray: The input array ``a`` with specified requirements and
84
+ type if provided.
85
+
86
+ .. seealso:: :func:`numpy.require`
87
+
88
+ """
89
+
90
+ possible_flags = {'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C',
91
+ 'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F',
92
+ 'O': 'OWNDATA', 'OWNDATA': 'OWNDATA'}
93
+
94
+ if not requirements:
95
+ try:
96
+ return cupy.asanyarray(a, dtype=dtype)
97
+ except TypeError:
98
+ raise ValueError("Incorrect dtype \"{}\" provided".format(dtype))
99
+ else:
100
+ try:
101
+ requirements = {possible_flags[x.upper()] for x in requirements}
102
+ except KeyError:
103
+ raise ValueError("Incorrect flag \"{}\" in requirements".format(
104
+ (set(requirements) -
105
+ set(possible_flags.keys())).pop()))
106
+
107
+ order = 'A'
108
+ if requirements >= {'C', 'F'}:
109
+ raise ValueError('Cannot specify both "C" and "F" order')
110
+ elif 'F' in requirements:
111
+ order = 'F_CONTIGUOUS'
112
+ requirements.remove('F')
113
+ elif 'C' in requirements:
114
+ order = 'C_CONTIGUOUS'
115
+ requirements.remove('C')
116
+
117
+ copy = 'OWNDATA' in requirements
118
+ try:
119
+ arr = cupy.array(a, dtype=dtype, order=order, copy=copy, subok=False)
120
+ except TypeError:
121
+ raise ValueError("Incorrect dtype \"{}\" provided".format(dtype))
122
+ return arr
vllm/lib/python3.10/site-packages/cupy/_manipulation/split.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy
2
+
3
+ from cupy import _core
4
+
5
+
6
+ def array_split(ary, indices_or_sections, axis=0):
7
+ """Splits an array into multiple sub arrays along a given axis.
8
+
9
+ This function is almost equivalent to :func:`cupy.split`. The only
10
+ difference is that this function allows an integer sections that does not
11
+ evenly divide the axis.
12
+
13
+ .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`
14
+
15
+ """
16
+ return _core.array_split(ary, indices_or_sections, axis)
17
+
18
+
19
+ def dsplit(ary, indices_or_sections):
20
+ """Splits an array into multiple sub arrays along the third axis.
21
+
22
+ This is equivalent to ``split`` with ``axis=2``.
23
+
24
+ .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`
25
+
26
+ """
27
+ if ary.ndim <= 2:
28
+ raise ValueError('Cannot dsplit an array with less than 3 dimensions')
29
+ return split(ary, indices_or_sections, 2)
30
+
31
+
32
+ def hsplit(ary, indices_or_sections):
33
+ """Splits an array into multiple sub arrays horizontally.
34
+
35
+ This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one
36
+ dimension, and otherwise that with ``axis=1``.
37
+
38
+ .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.hsplit`
39
+
40
+ """
41
+ if ary.ndim == 0:
42
+ raise ValueError('Cannot hsplit a zero-dimensional array')
43
+ if ary.ndim == 1:
44
+ return split(ary, indices_or_sections, 0)
45
+ else:
46
+ return split(ary, indices_or_sections, 1)
47
+
48
+
49
+ def split(ary, indices_or_sections, axis=0):
50
+ """Splits an array into multiple sub arrays along a given axis.
51
+
52
+ Args:
53
+ ary (cupy.ndarray): Array to split.
54
+ indices_or_sections (int or sequence of ints): A value indicating how
55
+ to divide the axis. If it is an integer, then is treated as the
56
+ number of sections, and the axis is evenly divided. Otherwise,
57
+ the integers indicate indices to split at. Note that the sequence
58
+ on the device memory is not allowed.
59
+ axis (int): Axis along which the array is split.
60
+
61
+ Returns:
62
+ A list of sub arrays. Each array is a view of the corresponding input
63
+ array.
64
+
65
+ .. seealso:: :func:`numpy.split`
66
+
67
+ """
68
+ if ary.ndim <= axis:
69
+ raise IndexError('Axis exceeds ndim')
70
+ size = ary.shape[axis]
71
+
72
+ if numpy.isscalar(indices_or_sections):
73
+ if size % indices_or_sections != 0:
74
+ raise ValueError(
75
+ 'indices_or_sections must divide the size along the axes.\n'
76
+ 'If you want to split the array into non-equally-sized '
77
+ 'arrays, use array_split instead.')
78
+ return array_split(ary, indices_or_sections, axis)
79
+
80
+
81
+ def vsplit(ary, indices_or_sections):
82
+ """Splits an array into multiple sub arrays along the first axis.
83
+
84
+ This is equivalent to ``split`` with ``axis=0``.
85
+
86
+ .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`
87
+
88
+ """
89
+ if ary.ndim <= 1:
90
+ raise ValueError('Cannot vsplit an array with less than 2 dimensions')
91
+ return split(ary, indices_or_sections, 0)