ZTWHHH commited on
Commit
d5ba5c2
·
verified ·
1 Parent(s): dac6ef6

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. evalkit_tf437/lib/python3.10/lib-dynload/_random.cpython-310-x86_64-linux-gnu.so +0 -0
  3. evalkit_tf437/lib/python3.10/lib-dynload/_sha1.cpython-310-x86_64-linux-gnu.so +0 -0
  4. evalkit_tf437/lib/python3.10/lib-dynload/_uuid.cpython-310-x86_64-linux-gnu.so +0 -0
  5. evalkit_tf437/lib/python3.10/lib-dynload/_xxtestfuzz.cpython-310-x86_64-linux-gnu.so +0 -0
  6. evalkit_tf437/lib/python3.10/lib-dynload/ossaudiodev.cpython-310-x86_64-linux-gnu.so +0 -0
  7. evalkit_tf437/lib/python3.10/lib-dynload/termios.cpython-310-x86_64-linux-gnu.so +0 -0
  8. evalkit_tf437/lib/python3.10/unittest/__pycache__/__init__.cpython-310.pyc +0 -0
  9. evalkit_tf437/lib/python3.10/unittest/__pycache__/loader.cpython-310.pyc +0 -0
  10. evalkit_tf437/lib/python3.10/unittest/__pycache__/runner.cpython-310.pyc +0 -0
  11. evalkit_tf437/lib/python3.10/unittest/suite.py +379 -0
  12. evalkit_tf437/lib/python3.10/xmlrpc/__init__.py +1 -0
  13. evalkit_tf437/lib/python3.10/xmlrpc/__pycache__/__init__.cpython-310.pyc +0 -0
  14. evalkit_tf437/lib/python3.10/xmlrpc/__pycache__/client.cpython-310.pyc +0 -0
  15. evalkit_tf446/lib/python3.10/site-packages/torch/backends/__pycache__/__init__.cpython-310.pyc +0 -0
  16. evalkit_tf446/lib/python3.10/site-packages/torch/backends/_coreml/__init__.py +0 -0
  17. evalkit_tf446/lib/python3.10/site-packages/torch/backends/_coreml/__pycache__/__init__.cpython-310.pyc +0 -0
  18. evalkit_tf446/lib/python3.10/site-packages/torch/backends/_coreml/__pycache__/preprocess.cpython-310.pyc +0 -0
  19. evalkit_tf446/lib/python3.10/site-packages/torch/backends/_coreml/preprocess.py +147 -0
  20. evalkit_tf446/lib/python3.10/site-packages/torch/backends/_nnapi/__init__.py +0 -0
  21. evalkit_tf446/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/__init__.cpython-310.pyc +0 -0
  22. evalkit_tf446/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/prepare.cpython-310.pyc +0 -0
  23. evalkit_tf446/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/serializer.cpython-310.pyc +0 -0
  24. evalkit_tf446/lib/python3.10/site-packages/torch/backends/_nnapi/prepare.py +199 -0
  25. evalkit_tf446/lib/python3.10/site-packages/torch/backends/_nnapi/serializer.py +2229 -0
  26. evalkit_tf446/lib/python3.10/site-packages/torch/backends/cpu/__init__.py +19 -0
  27. evalkit_tf446/lib/python3.10/site-packages/torch/backends/cpu/__pycache__/__init__.cpython-310.pyc +0 -0
  28. evalkit_tf446/lib/python3.10/site-packages/torch/backends/cuda/__init__.py +422 -0
  29. evalkit_tf446/lib/python3.10/site-packages/torch/backends/cuda/__pycache__/__init__.cpython-310.pyc +0 -0
  30. evalkit_tf446/lib/python3.10/site-packages/torch/backends/cudnn/__init__.py +207 -0
  31. evalkit_tf446/lib/python3.10/site-packages/torch/backends/cudnn/__pycache__/__init__.cpython-310.pyc +0 -0
  32. evalkit_tf446/lib/python3.10/site-packages/torch/backends/cudnn/__pycache__/rnn.cpython-310.pyc +0 -0
  33. evalkit_tf446/lib/python3.10/site-packages/torch/backends/cudnn/rnn.py +63 -0
  34. evalkit_tf446/lib/python3.10/site-packages/torch/backends/mha/__pycache__/__init__.cpython-310.pyc +0 -0
  35. evalkit_tf446/lib/python3.10/site-packages/torch/backends/mkl/__init__.py +57 -0
  36. evalkit_tf446/lib/python3.10/site-packages/torch/backends/mkl/__pycache__/__init__.cpython-310.pyc +0 -0
  37. evalkit_tf446/lib/python3.10/site-packages/torch/backends/mkldnn/__init__.py +98 -0
  38. evalkit_tf446/lib/python3.10/site-packages/torch/backends/mkldnn/__pycache__/__init__.cpython-310.pyc +0 -0
  39. evalkit_tf446/lib/python3.10/site-packages/torch/backends/mps/__init__.py +55 -0
  40. evalkit_tf446/lib/python3.10/site-packages/torch/backends/mps/__pycache__/__init__.cpython-310.pyc +0 -0
  41. evalkit_tf446/lib/python3.10/site-packages/torch/backends/nnpack/__init__.py +31 -0
  42. evalkit_tf446/lib/python3.10/site-packages/torch/backends/nnpack/__pycache__/__init__.cpython-310.pyc +0 -0
  43. evalkit_tf446/lib/python3.10/site-packages/torch/backends/openmp/__init__.py +7 -0
  44. evalkit_tf446/lib/python3.10/site-packages/torch/backends/openmp/__pycache__/__init__.cpython-310.pyc +0 -0
  45. evalkit_tf446/lib/python3.10/site-packages/torch/backends/opt_einsum/__init__.py +111 -0
  46. evalkit_tf446/lib/python3.10/site-packages/torch/backends/opt_einsum/__pycache__/__init__.cpython-310.pyc +0 -0
  47. evalkit_tf446/lib/python3.10/site-packages/torch/backends/quantized/__pycache__/__init__.cpython-310.pyc +0 -0
  48. evalkit_tf446/lib/python3.10/site-packages/torch/backends/xeon/__init__.py +0 -0
  49. evalkit_tf446/lib/python3.10/site-packages/torch/backends/xeon/__pycache__/__init__.cpython-310.pyc +0 -0
  50. evalkit_tf446/lib/python3.10/site-packages/torch/backends/xeon/__pycache__/run_cpu.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -1345,3 +1345,4 @@ evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libssl-28bef1ac.so
1345
  evalkit_tf437/lib/python3.10/site-packages/scipy/optimize/_lbfgsb.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1346
  evalkit_tf446/lib/python3.10/site-packages/gradio_client/__pycache__/media_data.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1347
  evalkit_tf446/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
1345
  evalkit_tf437/lib/python3.10/site-packages/scipy/optimize/_lbfgsb.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1346
  evalkit_tf446/lib/python3.10/site-packages/gradio_client/__pycache__/media_data.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1347
  evalkit_tf446/lib/python3.10/site-packages/torch/_decomp/__pycache__/decompositions.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1348
+ evalkit_tf446/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantization.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
evalkit_tf437/lib/python3.10/lib-dynload/_random.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (67.3 kB). View file
 
evalkit_tf437/lib/python3.10/lib-dynload/_sha1.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (60.4 kB). View file
 
evalkit_tf437/lib/python3.10/lib-dynload/_uuid.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (26.6 kB). View file
 
evalkit_tf437/lib/python3.10/lib-dynload/_xxtestfuzz.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (61.3 kB). View file
 
evalkit_tf437/lib/python3.10/lib-dynload/ossaudiodev.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (98.7 kB). View file
 
evalkit_tf437/lib/python3.10/lib-dynload/termios.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (66.7 kB). View file
 
evalkit_tf437/lib/python3.10/unittest/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.65 kB). View file
 
evalkit_tf437/lib/python3.10/unittest/__pycache__/loader.cpython-310.pyc ADDED
Binary file (14.7 kB). View file
 
evalkit_tf437/lib/python3.10/unittest/__pycache__/runner.cpython-310.pyc ADDED
Binary file (7.21 kB). View file
 
evalkit_tf437/lib/python3.10/unittest/suite.py ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TestSuite"""
2
+
3
+ import sys
4
+
5
+ from . import case
6
+ from . import util
7
+
8
+ __unittest = True
9
+
10
+
11
+ def _call_if_exists(parent, attr):
12
+ func = getattr(parent, attr, lambda: None)
13
+ func()
14
+
15
+
16
+ class BaseTestSuite(object):
17
+ """A simple test suite that doesn't provide class or module shared fixtures.
18
+ """
19
+ _cleanup = True
20
+
21
+ def __init__(self, tests=()):
22
+ self._tests = []
23
+ self._removed_tests = 0
24
+ self.addTests(tests)
25
+
26
+ def __repr__(self):
27
+ return "<%s tests=%s>" % (util.strclass(self.__class__), list(self))
28
+
29
+ def __eq__(self, other):
30
+ if not isinstance(other, self.__class__):
31
+ return NotImplemented
32
+ return list(self) == list(other)
33
+
34
+ def __iter__(self):
35
+ return iter(self._tests)
36
+
37
+ def countTestCases(self):
38
+ cases = self._removed_tests
39
+ for test in self:
40
+ if test:
41
+ cases += test.countTestCases()
42
+ return cases
43
+
44
+ def addTest(self, test):
45
+ # sanity checks
46
+ if not callable(test):
47
+ raise TypeError("{} is not callable".format(repr(test)))
48
+ if isinstance(test, type) and issubclass(test,
49
+ (case.TestCase, TestSuite)):
50
+ raise TypeError("TestCases and TestSuites must be instantiated "
51
+ "before passing them to addTest()")
52
+ self._tests.append(test)
53
+
54
+ def addTests(self, tests):
55
+ if isinstance(tests, str):
56
+ raise TypeError("tests must be an iterable of tests, not a string")
57
+ for test in tests:
58
+ self.addTest(test)
59
+
60
+ def run(self, result):
61
+ for index, test in enumerate(self):
62
+ if result.shouldStop:
63
+ break
64
+ test(result)
65
+ if self._cleanup:
66
+ self._removeTestAtIndex(index)
67
+ return result
68
+
69
+ def _removeTestAtIndex(self, index):
70
+ """Stop holding a reference to the TestCase at index."""
71
+ try:
72
+ test = self._tests[index]
73
+ except TypeError:
74
+ # support for suite implementations that have overridden self._tests
75
+ pass
76
+ else:
77
+ # Some unittest tests add non TestCase/TestSuite objects to
78
+ # the suite.
79
+ if hasattr(test, 'countTestCases'):
80
+ self._removed_tests += test.countTestCases()
81
+ self._tests[index] = None
82
+
83
+ def __call__(self, *args, **kwds):
84
+ return self.run(*args, **kwds)
85
+
86
+ def debug(self):
87
+ """Run the tests without collecting errors in a TestResult"""
88
+ for test in self:
89
+ test.debug()
90
+
91
+
92
+ class TestSuite(BaseTestSuite):
93
+ """A test suite is a composite test consisting of a number of TestCases.
94
+
95
+ For use, create an instance of TestSuite, then add test case instances.
96
+ When all tests have been added, the suite can be passed to a test
97
+ runner, such as TextTestRunner. It will run the individual test cases
98
+ in the order in which they were added, aggregating the results. When
99
+ subclassing, do not forget to call the base class constructor.
100
+ """
101
+
102
+ def run(self, result, debug=False):
103
+ topLevel = False
104
+ if getattr(result, '_testRunEntered', False) is False:
105
+ result._testRunEntered = topLevel = True
106
+
107
+ for index, test in enumerate(self):
108
+ if result.shouldStop:
109
+ break
110
+
111
+ if _isnotsuite(test):
112
+ self._tearDownPreviousClass(test, result)
113
+ self._handleModuleFixture(test, result)
114
+ self._handleClassSetUp(test, result)
115
+ result._previousTestClass = test.__class__
116
+
117
+ if (getattr(test.__class__, '_classSetupFailed', False) or
118
+ getattr(result, '_moduleSetUpFailed', False)):
119
+ continue
120
+
121
+ if not debug:
122
+ test(result)
123
+ else:
124
+ test.debug()
125
+
126
+ if self._cleanup:
127
+ self._removeTestAtIndex(index)
128
+
129
+ if topLevel:
130
+ self._tearDownPreviousClass(None, result)
131
+ self._handleModuleTearDown(result)
132
+ result._testRunEntered = False
133
+ return result
134
+
135
+ def debug(self):
136
+ """Run the tests without collecting errors in a TestResult"""
137
+ debug = _DebugResult()
138
+ self.run(debug, True)
139
+
140
+ ################################
141
+
142
+ def _handleClassSetUp(self, test, result):
143
+ previousClass = getattr(result, '_previousTestClass', None)
144
+ currentClass = test.__class__
145
+ if currentClass == previousClass:
146
+ return
147
+ if result._moduleSetUpFailed:
148
+ return
149
+ if getattr(currentClass, "__unittest_skip__", False):
150
+ return
151
+
152
+ failed = False
153
+ try:
154
+ currentClass._classSetupFailed = False
155
+ except TypeError:
156
+ # test may actually be a function
157
+ # so its class will be a builtin-type
158
+ pass
159
+
160
+ setUpClass = getattr(currentClass, 'setUpClass', None)
161
+ doClassCleanups = getattr(currentClass, 'doClassCleanups', None)
162
+ if setUpClass is not None:
163
+ _call_if_exists(result, '_setupStdout')
164
+ try:
165
+ try:
166
+ setUpClass()
167
+ except Exception as e:
168
+ if isinstance(result, _DebugResult):
169
+ raise
170
+ failed = True
171
+ try:
172
+ currentClass._classSetupFailed = True
173
+ except TypeError:
174
+ pass
175
+ className = util.strclass(currentClass)
176
+ self._createClassOrModuleLevelException(result, e,
177
+ 'setUpClass',
178
+ className)
179
+ if failed and doClassCleanups is not None:
180
+ doClassCleanups()
181
+ for exc_info in currentClass.tearDown_exceptions:
182
+ self._createClassOrModuleLevelException(
183
+ result, exc_info[1], 'setUpClass', className,
184
+ info=exc_info)
185
+ finally:
186
+ _call_if_exists(result, '_restoreStdout')
187
+
188
+ def _get_previous_module(self, result):
189
+ previousModule = None
190
+ previousClass = getattr(result, '_previousTestClass', None)
191
+ if previousClass is not None:
192
+ previousModule = previousClass.__module__
193
+ return previousModule
194
+
195
+
196
+ def _handleModuleFixture(self, test, result):
197
+ previousModule = self._get_previous_module(result)
198
+ currentModule = test.__class__.__module__
199
+ if currentModule == previousModule:
200
+ return
201
+
202
+ self._handleModuleTearDown(result)
203
+
204
+
205
+ result._moduleSetUpFailed = False
206
+ try:
207
+ module = sys.modules[currentModule]
208
+ except KeyError:
209
+ return
210
+ setUpModule = getattr(module, 'setUpModule', None)
211
+ if setUpModule is not None:
212
+ _call_if_exists(result, '_setupStdout')
213
+ try:
214
+ try:
215
+ setUpModule()
216
+ except Exception as e:
217
+ if isinstance(result, _DebugResult):
218
+ raise
219
+ result._moduleSetUpFailed = True
220
+ self._createClassOrModuleLevelException(result, e,
221
+ 'setUpModule',
222
+ currentModule)
223
+ if result._moduleSetUpFailed:
224
+ try:
225
+ case.doModuleCleanups()
226
+ except Exception as e:
227
+ self._createClassOrModuleLevelException(result, e,
228
+ 'setUpModule',
229
+ currentModule)
230
+ finally:
231
+ _call_if_exists(result, '_restoreStdout')
232
+
233
+ def _createClassOrModuleLevelException(self, result, exc, method_name,
234
+ parent, info=None):
235
+ errorName = f'{method_name} ({parent})'
236
+ self._addClassOrModuleLevelException(result, exc, errorName, info)
237
+
238
+ def _addClassOrModuleLevelException(self, result, exception, errorName,
239
+ info=None):
240
+ error = _ErrorHolder(errorName)
241
+ addSkip = getattr(result, 'addSkip', None)
242
+ if addSkip is not None and isinstance(exception, case.SkipTest):
243
+ addSkip(error, str(exception))
244
+ else:
245
+ if not info:
246
+ result.addError(error, sys.exc_info())
247
+ else:
248
+ result.addError(error, info)
249
+
250
+ def _handleModuleTearDown(self, result):
251
+ previousModule = self._get_previous_module(result)
252
+ if previousModule is None:
253
+ return
254
+ if result._moduleSetUpFailed:
255
+ return
256
+
257
+ try:
258
+ module = sys.modules[previousModule]
259
+ except KeyError:
260
+ return
261
+
262
+ _call_if_exists(result, '_setupStdout')
263
+ try:
264
+ tearDownModule = getattr(module, 'tearDownModule', None)
265
+ if tearDownModule is not None:
266
+ try:
267
+ tearDownModule()
268
+ except Exception as e:
269
+ if isinstance(result, _DebugResult):
270
+ raise
271
+ self._createClassOrModuleLevelException(result, e,
272
+ 'tearDownModule',
273
+ previousModule)
274
+ try:
275
+ case.doModuleCleanups()
276
+ except Exception as e:
277
+ if isinstance(result, _DebugResult):
278
+ raise
279
+ self._createClassOrModuleLevelException(result, e,
280
+ 'tearDownModule',
281
+ previousModule)
282
+ finally:
283
+ _call_if_exists(result, '_restoreStdout')
284
+
285
+ def _tearDownPreviousClass(self, test, result):
286
+ previousClass = getattr(result, '_previousTestClass', None)
287
+ currentClass = test.__class__
288
+ if currentClass == previousClass or previousClass is None:
289
+ return
290
+ if getattr(previousClass, '_classSetupFailed', False):
291
+ return
292
+ if getattr(result, '_moduleSetUpFailed', False):
293
+ return
294
+ if getattr(previousClass, "__unittest_skip__", False):
295
+ return
296
+
297
+ tearDownClass = getattr(previousClass, 'tearDownClass', None)
298
+ doClassCleanups = getattr(previousClass, 'doClassCleanups', None)
299
+ if tearDownClass is None and doClassCleanups is None:
300
+ return
301
+
302
+ _call_if_exists(result, '_setupStdout')
303
+ try:
304
+ if tearDownClass is not None:
305
+ try:
306
+ tearDownClass()
307
+ except Exception as e:
308
+ if isinstance(result, _DebugResult):
309
+ raise
310
+ className = util.strclass(previousClass)
311
+ self._createClassOrModuleLevelException(result, e,
312
+ 'tearDownClass',
313
+ className)
314
+ if doClassCleanups is not None:
315
+ doClassCleanups()
316
+ for exc_info in previousClass.tearDown_exceptions:
317
+ if isinstance(result, _DebugResult):
318
+ raise exc_info[1]
319
+ className = util.strclass(previousClass)
320
+ self._createClassOrModuleLevelException(result, exc_info[1],
321
+ 'tearDownClass',
322
+ className,
323
+ info=exc_info)
324
+ finally:
325
+ _call_if_exists(result, '_restoreStdout')
326
+
327
+
328
+ class _ErrorHolder(object):
329
+ """
330
+ Placeholder for a TestCase inside a result. As far as a TestResult
331
+ is concerned, this looks exactly like a unit test. Used to insert
332
+ arbitrary errors into a test suite run.
333
+ """
334
+ # Inspired by the ErrorHolder from Twisted:
335
+ # http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py
336
+
337
+ # attribute used by TestResult._exc_info_to_string
338
+ failureException = None
339
+
340
+ def __init__(self, description):
341
+ self.description = description
342
+
343
+ def id(self):
344
+ return self.description
345
+
346
+ def shortDescription(self):
347
+ return None
348
+
349
+ def __repr__(self):
350
+ return "<ErrorHolder description=%r>" % (self.description,)
351
+
352
+ def __str__(self):
353
+ return self.id()
354
+
355
+ def run(self, result):
356
+ # could call result.addError(...) - but this test-like object
357
+ # shouldn't be run anyway
358
+ pass
359
+
360
+ def __call__(self, result):
361
+ return self.run(result)
362
+
363
+ def countTestCases(self):
364
+ return 0
365
+
366
+ def _isnotsuite(test):
367
+ "A crude way to tell apart testcases and suites with duck-typing"
368
+ try:
369
+ iter(test)
370
+ except TypeError:
371
+ return True
372
+ return False
373
+
374
+
375
+ class _DebugResult(object):
376
+ "Used by the TestSuite to hold previous class when running in debug."
377
+ _previousTestClass = None
378
+ _moduleSetUpFailed = False
379
+ shouldStop = False
evalkit_tf437/lib/python3.10/xmlrpc/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # This directory is a Python package.
evalkit_tf437/lib/python3.10/xmlrpc/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (385 Bytes). View file
 
evalkit_tf437/lib/python3.10/xmlrpc/__pycache__/client.cpython-310.pyc ADDED
Binary file (34.6 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/torch/backends/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.14 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/torch/backends/_coreml/__init__.py ADDED
File without changes
evalkit_tf446/lib/python3.10/site-packages/torch/backends/_coreml/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (181 Bytes). View file
 
evalkit_tf446/lib/python3.10/site-packages/torch/backends/_coreml/__pycache__/preprocess.cpython-310.pyc ADDED
Binary file (3.74 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/torch/backends/_coreml/preprocess.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import hashlib
3
+ import json
4
+ from typing import Dict, Tuple
5
+
6
+ import coremltools as ct # type: ignore[import]
7
+ from coremltools.converters.mil.input_types import TensorType # type: ignore[import]
8
+ from coremltools.converters.mil.mil import types # type: ignore[import]
9
+ from coremltools.models.neural_network import quantization_utils # type: ignore[import]
10
+
11
+ import torch
12
+
13
+ CT_METADATA_VERSION = "com.github.apple.coremltools.version"
14
+ CT_METADATA_SOURCE = "com.github.apple.coremltools.source"
15
+
16
+
17
+ class ScalarType:
18
+ Float = 0
19
+ Double = 1
20
+ Int = 2
21
+ Long = 3
22
+ Undefined = 4
23
+
24
+
25
+ # Supported Tensor types in coremltools:
26
+ # https://github.com/apple/coremltools/blob/main/coremltools/converters/mil/frontend/torch/converter.py#L28
27
+ torch_to_mil_types = {
28
+ ScalarType.Float: types.fp32,
29
+ ScalarType.Double: types.fp64,
30
+ ScalarType.Int: types.int32,
31
+ ScalarType.Long: types.int64,
32
+ }
33
+
34
+
35
+ class CoreMLComputeUnit:
36
+ CPU = "cpuOnly"
37
+ CPUAndGPU = "cpuAndGPU"
38
+ ALL = "all"
39
+
40
+
41
+ class CoreMLQuantizationMode:
42
+ LINEAR = "linear"
43
+ LINEAR_SYMMETRIC = "linear_symmetric"
44
+ NONE = "none"
45
+
46
+
47
+ def TensorSpec(shape, dtype=ScalarType.Float):
48
+ return (shape, dtype)
49
+
50
+
51
+ def CompileSpec(
52
+ inputs,
53
+ outputs,
54
+ backend=CoreMLComputeUnit.CPU,
55
+ allow_low_precision=True,
56
+ quantization_mode=CoreMLQuantizationMode.NONE,
57
+ mlmodel_export_path=None,
58
+ ):
59
+ return (
60
+ inputs,
61
+ outputs,
62
+ backend,
63
+ allow_low_precision,
64
+ quantization_mode,
65
+ mlmodel_export_path,
66
+ )
67
+
68
+
69
+ def _check_enumerated_shape(shape):
70
+ for s in shape:
71
+ if not isinstance(s, (list, tuple)):
72
+ return False
73
+ return True
74
+
75
+
76
+ def _convert_to_mil_type(shape, dtype, name: str):
77
+ mil_shape = shape
78
+ if _check_enumerated_shape(shape):
79
+ mil_shape = ct.EnumeratedShapes(shape)
80
+ ml_type = TensorType(shape=mil_shape, dtype=torch_to_mil_types[dtype])
81
+ ml_type.name = name
82
+ return ml_type
83
+
84
+
85
+ def preprocess(script_module: torch._C.ScriptObject, compile_spec: Dict[str, Tuple]):
86
+ spec = compile_spec["forward"]
87
+ (
88
+ input_specs,
89
+ output_specs,
90
+ backend,
91
+ allow_low_precision,
92
+ quantization_mode,
93
+ mlmodel_export_path,
94
+ ) = spec
95
+ mil_inputs = []
96
+ inputs = []
97
+ for index, input in enumerate(input_specs):
98
+ shape, dtype = input
99
+ name = "input_" + str(index)
100
+ inputs.append([name, str(dtype), str(shape)])
101
+ ml_type = _convert_to_mil_type(shape, dtype, name)
102
+ mil_inputs.append(ml_type)
103
+ model = torch.jit.RecursiveScriptModule._construct(script_module, lambda x: None)
104
+ mlmodel = ct.convert(model, inputs=mil_inputs)
105
+
106
+ if quantization_mode != CoreMLQuantizationMode.NONE:
107
+ quant_model_spec = quantization_utils.quantize_weights(
108
+ mlmodel, nbits=8, quantization_mode=quantization_mode
109
+ )
110
+ mlmodel = ct.models.MLModel(quant_model_spec)
111
+
112
+ spec = mlmodel.get_spec()
113
+ assert len(spec.description.output) == len(output_specs) # type: ignore[attr-defined]
114
+ outputs = []
115
+ for index, output in enumerate(output_specs):
116
+ shape, dtype = output
117
+ name = spec.description.output[index].name # type: ignore[attr-defined]
118
+ outputs.append([name, str(dtype), str(shape)])
119
+ mlmodel = ct.models.model.MLModel(spec)
120
+ print(mlmodel)
121
+
122
+ if mlmodel_export_path is not None:
123
+ print(f"Saving CoreML .mlmodel file to {mlmodel_export_path}")
124
+ mlmodel.save(mlmodel_export_path)
125
+
126
+ config = {
127
+ "spec_ver": str(spec.specificationVersion), # type: ignore[attr-defined]
128
+ "backend": backend,
129
+ "allow_low_precision": str(allow_low_precision),
130
+ }
131
+ metadata = {
132
+ "coremltool_ver": mlmodel.user_defined_metadata[CT_METADATA_VERSION],
133
+ "torch_ver": mlmodel.user_defined_metadata[CT_METADATA_SOURCE],
134
+ }
135
+ coreml_compile_spec = {
136
+ "inputs": inputs,
137
+ "outputs": outputs,
138
+ "config": config,
139
+ "metadata": metadata,
140
+ }
141
+ mlmodel = spec.SerializeToString() # type: ignore[attr-defined]
142
+
143
+ return {
144
+ "model": mlmodel,
145
+ "hash": str(hashlib.sha256(mlmodel).hexdigest()),
146
+ "extra": json.dumps(coreml_compile_spec),
147
+ }
evalkit_tf446/lib/python3.10/site-packages/torch/backends/_nnapi/__init__.py ADDED
File without changes
evalkit_tf446/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (180 Bytes). View file
 
evalkit_tf446/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/prepare.cpython-310.pyc ADDED
Binary file (5.82 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/torch/backends/_nnapi/__pycache__/serializer.cpython-310.pyc ADDED
Binary file (55.8 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/torch/backends/_nnapi/prepare.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from typing import List, Optional
3
+
4
+ import torch
5
+ from torch.backends._nnapi.serializer import _NnapiSerializer
6
+
7
+ ANEURALNETWORKS_PREFER_LOW_POWER = 0
8
+ ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER = 1
9
+ ANEURALNETWORKS_PREFER_SUSTAINED_SPEED = 2
10
+
11
+
12
+ class NnapiModule(torch.nn.Module):
13
+ """Torch Module that wraps an NNAPI Compilation.
14
+
15
+ This module handles preparing the weights, initializing the
16
+ NNAPI TorchBind object, and adjusting the memory formats
17
+ of all inputs and outputs.
18
+ """
19
+
20
+ # _nnapi.Compilation is defined
21
+ comp: Optional[torch.classes._nnapi.Compilation] # type: ignore[name-defined]
22
+ weights: List[torch.Tensor]
23
+ out_templates: List[torch.Tensor]
24
+
25
+ def __init__(
26
+ self,
27
+ shape_compute_module: torch.nn.Module,
28
+ ser_model: torch.Tensor,
29
+ weights: List[torch.Tensor],
30
+ inp_mem_fmts: List[int],
31
+ out_mem_fmts: List[int],
32
+ compilation_preference: int,
33
+ relax_f32_to_f16: bool,
34
+ ):
35
+ super().__init__()
36
+ self.shape_compute_module = shape_compute_module
37
+ self.ser_model = ser_model
38
+ self.weights = weights
39
+ self.inp_mem_fmts = inp_mem_fmts
40
+ self.out_mem_fmts = out_mem_fmts
41
+ self.out_templates = []
42
+ self.comp = None
43
+ self.compilation_preference = compilation_preference
44
+ self.relax_f32_to_f16 = relax_f32_to_f16
45
+
46
+ @torch.jit.export
47
+ def init(self, args: List[torch.Tensor]):
48
+ assert self.comp is None
49
+ self.out_templates = self.shape_compute_module.prepare(self.ser_model, args) # type: ignore[operator]
50
+ self.weights = [w.contiguous() for w in self.weights]
51
+ comp = torch.classes._nnapi.Compilation()
52
+ comp.init2(
53
+ self.ser_model,
54
+ self.weights,
55
+ self.compilation_preference,
56
+ self.relax_f32_to_f16,
57
+ )
58
+
59
+ self.comp = comp
60
+
61
+ def forward(self, args: List[torch.Tensor]) -> List[torch.Tensor]:
62
+ if self.comp is None:
63
+ self.init(args)
64
+ comp = self.comp
65
+ assert comp is not None
66
+ outs = [torch.empty_like(out) for out in self.out_templates]
67
+
68
+ assert len(args) == len(self.inp_mem_fmts)
69
+ fixed_args = []
70
+ for idx in range(len(args)):
71
+ fmt = self.inp_mem_fmts[idx]
72
+ # These constants match the values in DimOrder in serializer.py
73
+ # TODO: See if it's possible to use those directly.
74
+ if fmt == 0:
75
+ fixed_args.append(args[idx].contiguous())
76
+ elif fmt == 1:
77
+ fixed_args.append(args[idx].permute(0, 2, 3, 1).contiguous())
78
+ else:
79
+ raise ValueError("Invalid mem_fmt")
80
+ comp.run(fixed_args, outs)
81
+ assert len(outs) == len(self.out_mem_fmts)
82
+ for idx in range(len(self.out_templates)):
83
+ fmt = self.out_mem_fmts[idx]
84
+ # These constants match the values in DimOrder in serializer.py
85
+ # TODO: See if it's possible to use those directly.
86
+ if fmt in (0, 2):
87
+ pass
88
+ elif fmt == 1:
89
+ outs[idx] = outs[idx].permute(0, 3, 1, 2)
90
+ else:
91
+ raise ValueError("Invalid mem_fmt")
92
+ return outs
93
+
94
+
95
+ def convert_model_to_nnapi(
96
+ model,
97
+ inputs,
98
+ serializer=None,
99
+ return_shapes=None,
100
+ use_int16_for_qint16=False,
101
+ compilation_preference=ANEURALNETWORKS_PREFER_SUSTAINED_SPEED,
102
+ relax_f32_to_f16=False,
103
+ ):
104
+ (
105
+ shape_compute_module,
106
+ ser_model_tensor,
107
+ used_weights,
108
+ inp_mem_fmts,
109
+ out_mem_fmts,
110
+ retval_count,
111
+ ) = process_for_nnapi(
112
+ model, inputs, serializer, return_shapes, use_int16_for_qint16
113
+ )
114
+
115
+ nnapi_model = NnapiModule(
116
+ shape_compute_module,
117
+ ser_model_tensor,
118
+ used_weights,
119
+ inp_mem_fmts,
120
+ out_mem_fmts,
121
+ compilation_preference,
122
+ relax_f32_to_f16,
123
+ )
124
+
125
+ class NnapiInterfaceWrapper(torch.nn.Module):
126
+ """NNAPI list-ifying and de-list-ifying wrapper.
127
+
128
+ NNAPI always expects a list of inputs and provides a list of outputs.
129
+ This module allows us to accept inputs as separate arguments.
130
+ It returns results as either a single tensor or tuple,
131
+ matching the original module.
132
+ """
133
+
134
+ def __init__(self, mod):
135
+ super().__init__()
136
+ self.mod = mod
137
+
138
+ wrapper_model_py = NnapiInterfaceWrapper(nnapi_model)
139
+ wrapper_model = torch.jit.script(wrapper_model_py)
140
+ # TODO: Maybe make these names match the original.
141
+ arg_list = ", ".join(f"arg_{idx}" for idx in range(len(inputs)))
142
+ if retval_count < 0:
143
+ ret_expr = "retvals[0]"
144
+ else:
145
+ ret_expr = "".join(f"retvals[{idx}], " for idx in range(retval_count))
146
+ wrapper_model.define(
147
+ f"def forward(self, {arg_list}):\n"
148
+ f" retvals = self.mod([{arg_list}])\n"
149
+ f" return {ret_expr}\n"
150
+ )
151
+ return wrapper_model
152
+
153
+
154
+ def process_for_nnapi(
155
+ model, inputs, serializer=None, return_shapes=None, use_int16_for_qint16=False
156
+ ):
157
+ model = torch.jit.freeze(model)
158
+
159
+ if isinstance(inputs, torch.Tensor):
160
+ inputs = [inputs]
161
+
162
+ serializer = serializer or _NnapiSerializer(
163
+ config=None, use_int16_for_qint16=use_int16_for_qint16
164
+ )
165
+ (
166
+ ser_model,
167
+ used_weights,
168
+ inp_mem_fmts,
169
+ out_mem_fmts,
170
+ shape_compute_lines,
171
+ retval_count,
172
+ ) = serializer.serialize_model(model, inputs, return_shapes)
173
+ ser_model_tensor = torch.tensor(ser_model, dtype=torch.int32)
174
+
175
+ # We have to create a new class here every time this function is called
176
+ # because module.define adds a method to the *class*, not the instance.
177
+ class ShapeComputeModule(torch.nn.Module):
178
+ """Code-gen-ed module for tensor shape computation.
179
+
180
+ module.prepare will mutate ser_model according to the computed operand
181
+ shapes, based on the shapes of args. Returns a list of output templates.
182
+ """
183
+
184
+ pass
185
+
186
+ shape_compute_module = torch.jit.script(ShapeComputeModule())
187
+ real_shape_compute_lines = [
188
+ "def prepare(self, ser_model: torch.Tensor, args: List[torch.Tensor]) -> List[torch.Tensor]:\n",
189
+ ] + [f" {line}\n" for line in shape_compute_lines]
190
+ shape_compute_module.define("".join(real_shape_compute_lines))
191
+
192
+ return (
193
+ shape_compute_module,
194
+ ser_model_tensor,
195
+ used_weights,
196
+ inp_mem_fmts,
197
+ out_mem_fmts,
198
+ retval_count,
199
+ )
evalkit_tf446/lib/python3.10/site-packages/torch/backends/_nnapi/serializer.py ADDED
@@ -0,0 +1,2229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import array
3
+ import enum
4
+ import functools
5
+ import logging
6
+ import operator
7
+ import struct
8
+ import sys
9
+ from typing import List, NamedTuple, Optional, Tuple
10
+
11
+ import torch
12
+
13
+
14
+ # TODO: Add type annotations
15
+ # TODO: Check tensor types for ops
16
+
17
+
18
+ LOG = logging.getLogger("nnapi_serialize")
19
+
20
+
21
+ class NNAPI_OperandCode:
22
+ FLOAT32 = 0
23
+ INT32 = 1
24
+ UINT32 = 2
25
+ TENSOR_FLOAT32 = 3
26
+ TENSOR_INT32 = 4
27
+ TENSOR_QUANT8_ASYMM = 5
28
+ BOOL = 6
29
+ TENSOR_QUANT16_SYMM = 7
30
+ TENSOR_FLOAT16 = 8
31
+ TENSOR_BOOL8 = 9
32
+ FLOAT16 = 10
33
+ TENSOR_QUANT8_SYMM_PER_CHANNEL = 11
34
+ TENSOR_QUANT16_ASYMM = 12
35
+
36
+
37
+ class NNAPI_OperationCode:
38
+ ADD = 0
39
+ AVERAGE_POOL_2D = 1
40
+ CONCATENATION = 2
41
+ CONV_2D = 3
42
+ DEPTHWISE_CONV_2D = 4
43
+ DEPTH_TO_SPACE = 5
44
+ DEQUANTIZE = 6
45
+ EMBEDDING_LOOKUP = 7
46
+ FLOOR = 8
47
+ FULLY_CONNECTED = 9
48
+ HASHTABLE_LOOKUP = 10
49
+ L2_NORMALIZATION = 11
50
+ L2_POOL_2D = 12
51
+ LOCAL_RESPONSE_NORMALIZATION = 13
52
+ LOGISTIC = 14
53
+ LSH_PROJECTION = 15
54
+ LSTM = 16
55
+ MAX_POOL_2D = 17
56
+ MUL = 18
57
+ RELU = 19
58
+ RELU1 = 20
59
+ RELU6 = 21
60
+ RESHAPE = 22
61
+ RESIZE_BILINEAR = 23
62
+ RNN = 24
63
+ SOFTMAX = 25
64
+ SPACE_TO_DEPTH = 26
65
+ SVDF = 27
66
+ TANH = 28
67
+ BATCH_TO_SPACE_ND = 29
68
+ DIV = 30
69
+ MEAN = 31
70
+ PAD = 32
71
+ SPACE_TO_BATCH_ND = 33
72
+ SQUEEZE = 34
73
+ STRIDED_SLICE = 35
74
+ SUB = 36
75
+ TRANSPOSE = 37
76
+ ABS = 38
77
+ ARGMAX = 39
78
+ ARGMIN = 40
79
+ AXIS_ALIGNED_BBOX_TRANSFORM = 41
80
+ BIDIRECTIONAL_SEQUENCE_LSTM = 42
81
+ BIDIRECTIONAL_SEQUENCE_RNN = 43
82
+ BOX_WITH_NMS_LIMIT = 44
83
+ CAST = 45
84
+ CHANNEL_SHUFFLE = 46
85
+ DETECTION_POSTPROCESSING = 47
86
+ EQUAL = 48
87
+ EXP = 49
88
+ EXPAND_DIMS = 50
89
+ GATHER = 51
90
+ GENERATE_PROPOSALS = 52
91
+ GREATER = 53
92
+ GREATER_EQUAL = 54
93
+ GROUPED_CONV_2D = 55
94
+ HEATMAP_MAX_KEYPOINT = 56
95
+ INSTANCE_NORMALIZATION = 57
96
+ LESS = 58
97
+ LESS_EQUAL = 59
98
+ LOG = 60
99
+ LOGICAL_AND = 61
100
+ LOGICAL_NOT = 62
101
+ LOGICAL_OR = 63
102
+ LOG_SOFTMAX = 64
103
+ MAXIMUM = 65
104
+ MINIMUM = 66
105
+ NEG = 67
106
+ NOT_EQUAL = 68
107
+ PAD_V2 = 69
108
+ POW = 70
109
+ PRELU = 71
110
+ QUANTIZE = 72
111
+ QUANTIZED_16BIT_LSTM = 73
112
+ RANDOM_MULTINOMIAL = 74
113
+ REDUCE_ALL = 75
114
+ REDUCE_ANY = 76
115
+ REDUCE_MAX = 77
116
+ REDUCE_MIN = 78
117
+ REDUCE_PROD = 79
118
+ REDUCE_SUM = 80
119
+ ROI_ALIGN = 81
120
+ ROI_POOLING = 82
121
+ RSQRT = 83
122
+ SELECT = 84
123
+ SIN = 85
124
+ SLICE = 86
125
+ SPLIT = 87
126
+ SQRT = 88
127
+ TILE = 89
128
+ TOPK_V2 = 90
129
+ TRANSPOSE_CONV_2D = 91
130
+ UNIDIRECTIONAL_SEQUENCE_LSTM = 92
131
+ UNIDIRECTIONAL_SEQUENCE_RNN = 93
132
+ RESIZE_NEAREST_NEIGHBOR = 94
133
+
134
+
135
+ class NNAPI_FuseCode:
136
+ FUSED_NONE = 0
137
+ FUSED_RELU = 1
138
+ FUSED_RELU1 = 2
139
+ FUSED_RELU6 = 3
140
+
141
+
142
+ class OperandValueSourceType:
143
+ IMMEDIATE = 0
144
+ NUMBERED_BUFFER = 2
145
+ NUMBERED_MEMORY = 3
146
+
147
+
148
+ # Scalar types that appear explicitly in models.
149
+ # These must be kept in sync with
150
+ # AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS.
151
+ # TODO: Expose these directly to Python to avoid maintaining this list.
152
+ class TorchScalarTypes(enum.Enum):
153
+ QUINT8 = 13
154
+
155
+
156
+ def approx_equal(lhs, rhs, tolerance=1e-6):
157
+ return abs(lhs - rhs) <= tolerance * min(lhs, rhs)
158
+
159
+
160
+ def tensor_size(op_type, dims):
161
+ ITEM_SIZES = {
162
+ NNAPI_OperandCode.TENSOR_FLOAT32: 4,
163
+ NNAPI_OperandCode.TENSOR_INT32: 4,
164
+ NNAPI_OperandCode.TENSOR_QUANT8_ASYMM: 1,
165
+ NNAPI_OperandCode.TENSOR_QUANT16_SYMM: 2,
166
+ NNAPI_OperandCode.TENSOR_QUANT16_ASYMM: 2,
167
+ }
168
+ size = ITEM_SIZES[op_type]
169
+ for d in dims:
170
+ size *= d
171
+ return size
172
+
173
+
174
+ def change_element(tup, index, value):
175
+ ls = list(tup)
176
+ ls[index] = value
177
+ return tuple(ls)
178
+
179
+
180
+ class ConvPoolArgs2d(NamedTuple):
181
+ """Configuration arguments for a convolution."""
182
+
183
+ kernel_h: int
184
+ kernel_w: int
185
+ stride_h: int
186
+ stride_w: int
187
+ pad_t: int
188
+ pad_b: int
189
+ pad_l: int
190
+ pad_r: int
191
+ dilation_h: int
192
+ dilation_w: int
193
+ group: int
194
+
195
+
196
+ class DimOrder(enum.Enum):
197
+ PRESUMED_CONTIGUOUS = 0
198
+ CHANNELS_LAST = 1
199
+ SCALAR_OR_VECTOR = 2
200
+ UNKNOWN_CONSTANT = 999
201
+
202
+
203
+ class Operand(NamedTuple):
204
+ """Represenation of an NNAPI operand."""
205
+
206
+ # NNAPI operand type. One of NNAPI_OperandCode.
207
+ # TODO: Make this an enum.
208
+ op_type: int
209
+
210
+ # This is always the PyTorch shape, which is NCHW for feature maps.
211
+ # The actual NNAPI operand might have a transposed shape.
212
+ # we use 0 for load time dynamic shapes & -1 for runtime dynamic shapes
213
+ shape: Tuple[int, ...]
214
+
215
+ # Specifies how the shape of the operand that we define in NNAPI
216
+ # relates to the shape we track above.
217
+ # - PRESUMED_CONTIGUOUS: physical NNAPI operand will exactly match
218
+ # the shape of the PyTorch tensor.
219
+ # - CHANNELS_LAST: The PyTorch tensor is expected to be NCHW, and
220
+ # the NNAPI operand will be represented explicitly as NHWC.
221
+ dim_order: DimOrder
222
+
223
+ # Quantization params
224
+ scale: float
225
+ zero_point: int
226
+
227
+ def use_nchw(self):
228
+ if self.dim_order is DimOrder.PRESUMED_CONTIGUOUS:
229
+ return True
230
+ if self.dim_order is DimOrder.CHANNELS_LAST:
231
+ return False
232
+ raise Exception("Unknown dim order") # noqa: TRY002
233
+
234
+
235
+ def broadcast_shapes(shape1, shape2):
236
+ assert len(shape1) > 0
237
+ assert len(shape2) > 0
238
+ s1 = list(shape1)
239
+ s2 = list(shape2)
240
+ # TODO: Support non-equal-rank broadcast where semantics match.
241
+ # This can be tricky for NHWC tensors because dimension orders
242
+ # don't match between PT and NNAPI, even though semantics match.
243
+ if len(s1) > len(s2):
244
+ # s2 = [1] * (len(s1) - len(s2)) + s2
245
+ raise Exception( # noqa: TRY002
246
+ "Non-equal-rank broadcast is not supported yet."
247
+ ) # noqa: TRY002
248
+ if len(s2) > len(s1):
249
+ # s3 = [1] * (len(s2) - len(s1)) + s1
250
+ raise Exception( # noqa: TRY002
251
+ "Non-equal-rank broadcast is not supported yet."
252
+ ) # noqa: TRY002
253
+ ret = []
254
+ for d1, d2 in zip(s1, s2):
255
+ if d1 == 1:
256
+ ret.append(d2)
257
+ elif d2 == 1:
258
+ ret.append(d1)
259
+ elif d1 == d2:
260
+ ret.append(d1)
261
+ else:
262
+ raise Exception( # noqa: TRY002
263
+ f"Cannot broadcast shapes: {shape1} and {shape2}"
264
+ ) # noqa: TRY002
265
+ return tuple(ret)
266
+
267
+
268
+ def get_conv_pool_shape(image_shape, args, out_ch, transpose):
269
+ batch, in_c, in_h, in_w = image_shape
270
+
271
+ # TODO: Handle dilation
272
+ if args.dilation_h != 1 or args.dilation_w != 1:
273
+ raise Exception("Dilation not supported yet.") # noqa: TRY002
274
+
275
+ if transpose:
276
+ out_h = (in_h - 1) * args.stride_h + args.kernel_h - args.pad_t - args.pad_b
277
+ out_w = (in_w - 1) * args.stride_w + args.kernel_w - args.pad_l - args.pad_l
278
+ else:
279
+ out_h = (in_h - args.kernel_h + args.pad_t + args.pad_b) // args.stride_h + 1
280
+ out_w = (in_w - args.kernel_w + args.pad_l + args.pad_r) // args.stride_w + 1
281
+
282
+ # Handle variable-sized tensors.
283
+ if in_h == 0:
284
+ out_h = 0
285
+ if in_w == 0:
286
+ out_w = 0
287
+
288
+ out_shape = (batch, out_ch, out_h, out_w)
289
+ return out_shape
290
+
291
+
292
+ def fix_shape(shape, dim_order):
293
+ # Return the actual shape that an operand should have in NNAPI,
294
+ # given a PyTorch shape and dimension order. This is where we
295
+ # convert from PyTorch's "always NCHW" shape to explicit NHWC.
296
+ if dim_order is DimOrder.PRESUMED_CONTIGUOUS:
297
+ return shape
298
+ if dim_order is DimOrder.CHANNELS_LAST:
299
+ return tuple([shape[0]] + list(shape[2:]) + [shape[1]])
300
+ if dim_order is DimOrder.SCALAR_OR_VECTOR:
301
+ assert len(shape) == 0 or len(shape) == 1
302
+ return shape
303
+ if dim_order is DimOrder.UNKNOWN_CONSTANT:
304
+ # XXX think this through
305
+ return shape
306
+ raise Exception(f"Bad dim_order: {dim_order!r}.") # noqa: TRY002
307
+
308
+
309
+ def reverse_map_dim(dim_order, d):
310
+ # Return the original PyTorch dimension position for a given dimension.
311
+ # d should be the dimension that NNAPI will see.
312
+ # reverse_map_dim(PRESUMED_CONTIGUOUS, x) == x
313
+ # reverse_map_dim(CHANNELS_LAST, 3) == 1
314
+ if dim_order in (DimOrder.PRESUMED_CONTIGUOUS, DimOrder.SCALAR_OR_VECTOR):
315
+ return d
316
+ assert dim_order is DimOrder.CHANNELS_LAST
317
+ return [0, 2, 3, 1][d]
318
+
319
+
320
+ def flex_name(op_id, dim):
321
+ # Return the local variable name for the computed flexible size
322
+ # for a given op and dimension.
323
+ return f"s_{op_id}_{dim}"
324
+
325
+
326
+ class _NnapiSerializer:
327
+ def __init__(self, config, use_int16_for_qint16=False):
328
+ self.operands = []
329
+ self.values = []
330
+ self.operations = []
331
+ self.value_data = []
332
+ self.operation_args = []
333
+ self.inputs = []
334
+ self.outputs = []
335
+ self.flexible_shape_computation_lines = []
336
+
337
+ self.modules = {}
338
+ self.constants = {}
339
+ self.tensor_sequences = {}
340
+ self.jitval_operand_map = {}
341
+ self.cached_immediates = {}
342
+ self.used_weights = []
343
+ self.weight_offset = 0
344
+ self.use_int16_for_qint16 = use_int16_for_qint16
345
+
346
+ if config is None:
347
+ config = {}
348
+
349
+ def get_next_operand_id(self):
350
+ return len(self.operands)
351
+
352
+ # Add a tensor operand corresponding to a JIT Value.
353
+ # Returns the NNAPI operand ID. Can be looked up later with
354
+ # get_tensor_operand_by_jitval.
355
+ def add_tensor_operand(self, jitval, oper):
356
+ assert isinstance(oper, Operand)
357
+ if jitval in self.jitval_operand_map:
358
+ raise Exception(f"Duplicate tensor: {jitval!r}") # noqa: TRY002
359
+
360
+ operand_id = self.get_next_operand_id()
361
+ self.operands.append(oper)
362
+ self.jitval_operand_map[jitval] = operand_id
363
+ return operand_id
364
+
365
+ # Add a tensor operand that does not correspond to a JIT Value.
366
+ # Useful for cases where multiple NNAPI operands are required
367
+ # to implement one JIT IR node. Returns the NNAPI operand ID.
368
+ def add_anonymous_tensor_operand(self, oper):
369
+ assert isinstance(oper, Operand)
370
+ operand_id = self.get_next_operand_id()
371
+ self.operands.append(oper)
372
+ return operand_id
373
+
374
+ def torch_tensor_to_operand(self, tensor, dim_order):
375
+ dtype = str(tensor.dtype).replace("torch.", "")
376
+ scale = 0.0
377
+ zero_point = 0
378
+ if dtype == "float32":
379
+ op_type = NNAPI_OperandCode.TENSOR_FLOAT32
380
+ elif dtype == "int32":
381
+ op_type = NNAPI_OperandCode.TENSOR_INT32
382
+ elif dtype == "quint8":
383
+ op_type = NNAPI_OperandCode.TENSOR_QUANT8_ASYMM
384
+ scale = tensor.q_scale()
385
+ zero_point = tensor.q_zero_point()
386
+ elif dtype == "qint32":
387
+ op_type = NNAPI_OperandCode.TENSOR_INT32
388
+ scale = tensor.q_scale()
389
+ zero_point = tensor.q_zero_point()
390
+ assert zero_point == 0
391
+ elif dtype == "int16":
392
+ if self.use_int16_for_qint16:
393
+ nnapi_dtype = getattr(tensor, "nnapi_dtype", None)
394
+ op_codes = (
395
+ NNAPI_OperandCode.TENSOR_QUANT16_SYMM,
396
+ NNAPI_OperandCode.TENSOR_QUANT16_ASYMM,
397
+ )
398
+ if nnapi_dtype in op_codes:
399
+ op_type = nnapi_dtype
400
+ scale = tensor.nnapi_scale
401
+ zero_point = tensor.nnapi_zero_point
402
+ else:
403
+ raise Exception( # noqa: TRY002
404
+ f"`nnapi_type` needs to be one of {op_codes} for `int16`"
405
+ )
406
+ else:
407
+ raise Exception( # noqa: TRY002
408
+ "`int16` isn't supported. If you're trying to represent NNAPI"
409
+ " qint16 with Pytorch int16, set `use_int16_for_qint16 = True`"
410
+ )
411
+ else:
412
+ raise Exception( # noqa: TRY002
413
+ f"Can't handle input with dtype '{tensor.dtype}'"
414
+ ) # noqa: TRY002
415
+ return Operand(
416
+ shape=tuple(tensor.shape),
417
+ op_type=op_type,
418
+ dim_order=dim_order,
419
+ scale=scale,
420
+ zero_point=zero_point,
421
+ )
422
+
423
+ def add_tensor_operand_for_input(self, arg_idx, jitval, tensor):
424
+ dim_order = (
425
+ DimOrder.CHANNELS_LAST
426
+ if getattr(tensor, "nnapi_nhwc", False)
427
+ else DimOrder.PRESUMED_CONTIGUOUS
428
+ )
429
+ toper = self.torch_tensor_to_operand(tensor, dim_order)
430
+ operand_id = self.add_tensor_operand(jitval, toper)
431
+ self.inputs.append(operand_id)
432
+ for dim, size in enumerate(tensor.shape):
433
+ if size == 0:
434
+ self.compute_operand_shape(
435
+ operand_id, dim, f"args[{arg_idx}].shape[{dim}]"
436
+ )
437
+ return operand_id
438
+
439
+ def add_tensor_operand_for_weight(
440
+ self, tensor, dim_order=DimOrder.UNKNOWN_CONSTANT
441
+ ):
442
+ toper = self.torch_tensor_to_operand(tensor, dim_order)
443
+ operand_id = len(self.operands)
444
+ self.operands.append(toper)
445
+ tsize = tensor_size(toper.op_type, toper.shape)
446
+ psize = ((tsize - 1) | 0x3) + 1
447
+ self.values.append((operand_id, OperandValueSourceType.NUMBERED_BUFFER))
448
+ buf_num = len(self.used_weights)
449
+ offset = 0
450
+ self.value_data.append(struct.pack("iii", buf_num, offset, tsize))
451
+ # For NHWC NNAPI op, lay out data in the same dim order by permuting torch tensor
452
+ if dim_order == DimOrder.CHANNELS_LAST:
453
+ tensor = tensor.permute(0, 2, 3, 1)
454
+ self.used_weights.append(tensor)
455
+ return operand_id
456
+
457
+ def add_immediate_operand(self, code, value, dims):
458
+ assert isinstance(dims, tuple)
459
+ cache_key = (code, value)
460
+ if cache_key not in self.cached_immediates:
461
+ operand_id = len(self.operands)
462
+ self.operands.append(Operand(code, dims, DimOrder.SCALAR_OR_VECTOR, 0.0, 0))
463
+ self.values.append((operand_id, OperandValueSourceType.IMMEDIATE))
464
+ self.value_data.append(value)
465
+ self.cached_immediates[cache_key] = operand_id
466
+ return self.cached_immediates[cache_key]
467
+
468
+ def add_immediate_int_scalar(self, value):
469
+ return self.add_immediate_operand(
470
+ NNAPI_OperandCode.INT32, struct.pack("i", value), ()
471
+ )
472
+
473
+ def add_immediate_float_scalar(self, value):
474
+ return self.add_immediate_operand(
475
+ NNAPI_OperandCode.FLOAT32, struct.pack("f", value), ()
476
+ )
477
+
478
+ def add_immediate_bool_scalar(self, value):
479
+ return self.add_immediate_operand(
480
+ NNAPI_OperandCode.BOOL, b"\x01" if value else b"\x00", ()
481
+ )
482
+
483
+ def add_immediate_int_vector(self, value):
484
+ return self.add_immediate_operand(
485
+ NNAPI_OperandCode.TENSOR_INT32,
486
+ array.array("i", value).tobytes(),
487
+ (len(value),),
488
+ )
489
+
490
+ def has_operand_for_jitval(self, jitval):
491
+ return jitval in self.jitval_operand_map
492
+
493
+ def get_tensor_operand_by_jitval(self, jitval):
494
+ operand_id = self.jitval_operand_map[jitval]
495
+ return (operand_id, self.operands[operand_id])
496
+
497
+ def get_tensor_operand_by_jitval_fixed_size(self, jitval):
498
+ op_id, oper = self.get_tensor_operand_by_jitval(jitval)
499
+ for s in oper.shape:
500
+ if s == 0:
501
+ # TODO: Improve this error message, possibly after converting
502
+ # many callsites to support flexible size.
503
+ raise Exception( # noqa: TRY002
504
+ "Flexible size is not supported for this operand."
505
+ ) # noqa: TRY002
506
+ if s < 0:
507
+ # runtime flex
508
+ LOG.warning("Operand %s has runtime flex shape", oper)
509
+ return op_id, oper
510
+
511
+ def get_tensor_operand_or_constant(
512
+ self, jitval, dim_order=DimOrder.PRESUMED_CONTIGUOUS
513
+ ):
514
+ operand_id = self.jitval_operand_map.get(jitval)
515
+ if operand_id is None:
516
+ _, value = self.get_constant_value(jitval, "TensorType")
517
+ operand_id = self.add_tensor_operand_for_weight(value, dim_order)
518
+ return (operand_id, self.operands[operand_id])
519
+
520
+ def get_tensor_operand_for_weight(self, jitval):
521
+ _, value = self.get_constant_value(jitval, "TensorType")
522
+ operand_id = self.add_tensor_operand_for_weight(value)
523
+ return (operand_id, self.operands[operand_id])
524
+
525
+ def add_operation(self, opcode, inputs, outputs):
526
+ self.operations.append((opcode, len(inputs), len(outputs)))
527
+ self.operation_args.extend(inputs + outputs)
528
+
529
+ def add_tensor_sequence(self, jitval, values):
530
+ assert jitval not in self.tensor_sequences
531
+ self.tensor_sequences[jitval] = values
532
+
533
+ def add_constant_value(self, jitval, ctype, value):
534
+ assert jitval not in self.constants
535
+ self.constants[jitval] = (ctype, value)
536
+
537
+ def get_constant_value(self, jitval, typekind=None):
538
+ record = self.constants.get(jitval)
539
+ if record is None:
540
+ raise Exception( # noqa: TRY002
541
+ f"Could not find constant value for '{jitval!r}'."
542
+ ) # noqa: TRY002
543
+ ctype, _ = record
544
+ if typekind is not None and ctype.kind() != typekind:
545
+ raise Exception( # noqa: TRY002
546
+ f"Expected constant value of type {typekind}, but got {ctype.kind()} for value '{jitval!r}'"
547
+ )
548
+ return record
549
+
550
+ def operand_to_template_torchscript(self, op_id, oper, shape=None):
551
+ """Return a TorchScript expression to build a template for a given operand."""
552
+ if shape is None:
553
+ shape = oper.shape
554
+ else:
555
+ assert len(shape) == len(oper.shape)
556
+
557
+ shape_parts = ["("]
558
+ for d, s in enumerate(shape):
559
+ if s > 0:
560
+ # Fixed shape dimension: just add the value.
561
+ shape_parts.append(str(s))
562
+ elif s == 0:
563
+ # Load time flexible shape dimension: it should have been computed in a variable.
564
+ shape_parts.append(flex_name(op_id, d))
565
+ elif s == -1:
566
+ # Runtime flexible shape
567
+ shape_parts.append("0")
568
+ else:
569
+ raise Exception( # noqa: TRY002
570
+ "Unknown dim value, dimensions should be >= -1"
571
+ ) # noqa: TRY002
572
+ shape_parts.append(",")
573
+ shape_parts.append(")")
574
+ shape_code = "".join(shape_parts)
575
+ if oper.op_type == NNAPI_OperandCode.TENSOR_FLOAT32:
576
+ return f"torch.zeros({shape_code}, dtype=torch.float32)"
577
+ elif oper.op_type == NNAPI_OperandCode.TENSOR_INT32:
578
+ return f"torch.zeros({shape_code}, dtype=torch.int32)"
579
+ elif oper.op_type == NNAPI_OperandCode.TENSOR_QUANT8_ASYMM:
580
+ return (
581
+ f"torch.quantize_per_tensor("
582
+ f"torch.zeros(1), scale={oper.scale}, zero_point={oper.zero_point}, dtype=torch.quint8)"
583
+ f".expand({shape_code}).contiguous()"
584
+ )
585
+ elif oper.op_type in (
586
+ NNAPI_OperandCode.TENSOR_QUANT16_ASYMM,
587
+ NNAPI_OperandCode.TENSOR_QUANT16_SYMM,
588
+ ):
589
+ if self.use_int16_for_qint16:
590
+ return f"torch.zeros({shape_code}, dtype=torch.int16)"
591
+ else:
592
+ raise Exception( # noqa: TRY002
593
+ "`int16` isn't supported. If you're trying to represent NNAPI"
594
+ " qint16 with Pytorch int16, set `use_int16_for_qint16 = True`"
595
+ )
596
+
597
+ raise Exception( # noqa: TRY002
598
+ f"Unsupported output operand type: {oper.op_type}"
599
+ ) # noqa: TRY002
600
+
601
+ def forward_operand_shape(self, out_op_id, out_dim, in_op_id, in_dim):
602
+ self.compute_operand_shape(out_op_id, out_dim, flex_name(in_op_id, in_dim))
603
+
604
+ def compute_operand_shape(self, op_id, dim, expr):
605
+ self.flexible_shape_computation_lines.append(
606
+ f"{flex_name(op_id, dim)} = {expr}"
607
+ )
608
+
609
+ def transpose_to_nhwc(self, in_id, oper):
610
+ if oper.shape[2:] != (1, 1):
611
+ raise Exception( # noqa: TRY002
612
+ "Automatic transpose only supported for H,W == 1,1"
613
+ ) # noqa: TRY002
614
+
615
+ out_oper = oper._replace(dim_order=DimOrder.CHANNELS_LAST)
616
+
617
+ inputs = [None] * 2
618
+ inputs[0] = in_id
619
+ inputs[1] = self.add_immediate_int_vector([0, 2, 3, 1])
620
+
621
+ outputs = [None] * 1
622
+ outputs[0] = self.add_anonymous_tensor_operand(out_oper)
623
+
624
+ self.add_operation(NNAPI_OperationCode.TRANSPOSE, inputs, outputs)
625
+
626
+ return outputs[0], out_oper
627
+
628
+ # Transpose inputs as necessary to allow broadcasting.
629
+ def transpose_for_broadcast(self, in0_id, in0_oper, in1_id, in1_oper):
630
+ if in0_oper.dim_order == in1_oper.dim_order:
631
+ return in0_id, in0_oper, in1_id, in1_oper
632
+
633
+ # Assume NHWC is preferred if there is a mismatch.
634
+ orders = (in0_oper.dim_order, in1_oper.dim_order)
635
+ if orders == (DimOrder.PRESUMED_CONTIGUOUS, DimOrder.CHANNELS_LAST):
636
+ return self.transpose_to_nhwc(in0_id, in0_oper) + (in1_id, in1_oper)
637
+ if orders == (DimOrder.CHANNELS_LAST, DimOrder.PRESUMED_CONTIGUOUS):
638
+ return (in0_id, in0_oper) + self.transpose_to_nhwc(in1_id, in1_oper)
639
+
640
+ raise Exception( # noqa: TRY002
641
+ f"Automatic transpose not supported for dim_orders: {in0_oper.dim_order!r}, {in1_oper.dim_order!r}"
642
+ )
643
+
644
+ def get_size_arg(self, jitval):
645
+ ctype, value = self.get_constant_value(jitval)
646
+ if ctype.kind() == "ListType":
647
+ assert ctype.getElementType().kind() == "IntType"
648
+ return value
649
+ raise Exception( # noqa: TRY002
650
+ f"Can't handle size arg of type '{ctype!r}' for '{jitval!r}'"
651
+ ) # noqa: TRY002
652
+
653
+ def get_conv_pool_args_2d_from_pack(self, kernel_size, packed_config):
654
+ pc = [i.item() for i in packed_config]
655
+ assert pc[0] == 2
656
+ strides = [pc[1], pc[2]]
657
+ paddings = [pc[3], pc[4]]
658
+ dilations = [pc[5], pc[6]]
659
+ output_padding = [pc[7], pc[8]]
660
+ group_num = pc[9]
661
+
662
+ assert len(pc) == 11
663
+ assert output_padding == [0, 0]
664
+
665
+ return self.get_conv_pool_args_2d_common(
666
+ kernel_size, strides, paddings, dilations, group_num
667
+ )
668
+
669
+ def get_conv_pool_args_2d_from_jit(
670
+ self, kernel_size, stride, padding, dilation=None, group=None
671
+ ):
672
+ strides = self.get_size_arg(stride)
673
+ paddings = self.get_size_arg(padding)
674
+ if dilation is None:
675
+ dilations = [1, 1]
676
+ else:
677
+ dilations = self.get_size_arg(dilation)
678
+ if group is not None:
679
+ _, group_num = self.get_constant_value(group, "IntType")
680
+ else:
681
+ group_num = None
682
+ return self.get_conv_pool_args_2d_common(
683
+ kernel_size, strides, paddings, dilations, group_num
684
+ )
685
+
686
+ def get_conv_pool_args_2d_common(
687
+ self, kernel_size, strides, paddings, dilations, group_num
688
+ ):
689
+ kernels = list(kernel_size)
690
+
691
+ assert len(kernels) == 2
692
+ assert len(strides) == 2
693
+ assert len(paddings) == 2
694
+ assert len(dilations) == 2
695
+
696
+ # NNAPI uses 4 values for padding.
697
+ ph, pw = paddings
698
+ real_paddings = [ph, ph, pw, pw]
699
+
700
+ return ConvPoolArgs2d(
701
+ *(kernels + strides + real_paddings + dilations + [group_num])
702
+ )
703
+
704
+ def serialize_model(self, model, inputs, return_shapes=None):
705
+ self.add_immediate_bool_scalar(False)
706
+ self.add_immediate_bool_scalar(True)
707
+
708
+ inp_dim_orders = []
709
+ out_dim_orders = []
710
+
711
+ self_jitval = next(model.graph.inputs())
712
+ self.add_constant_value(self_jitval, self_jitval.type(), model)
713
+
714
+ for arg_idx, (input_value, input_tensor) in enumerate(
715
+ zip(list(model.graph.inputs())[1:], inputs)
716
+ ):
717
+ op_id = self.add_tensor_operand_for_input(
718
+ arg_idx, input_value, input_tensor
719
+ )
720
+ inp_dim_orders.append(self.operands[op_id].dim_order.value)
721
+
722
+ for idx, node in enumerate(model.graph.nodes()):
723
+ LOG.debug("Processing node #%d: %r", idx, node)
724
+ self.add_node(node)
725
+
726
+ retn = model.graph.return_node()
727
+ assert retn.inputsSize() == 1
728
+ assert retn.outputsSize() == 0
729
+ retn_input = retn.inputsAt(0)
730
+ template_return_lines = ["return ["]
731
+ if retn_input.type().kind() == "TensorType":
732
+ return_values = [retn_input]
733
+ retval_count = -1
734
+ elif retn_input.type().kind() == "TupleType":
735
+ return_values = self.tensor_sequences[retn_input]
736
+ retval_count = len(return_values)
737
+ else:
738
+ raise Exception( # noqa: TRY002
739
+ f"Unsupported return type: {retn_input.type()}"
740
+ ) # noqa: TRY002
741
+
742
+ if return_shapes is not None:
743
+ assert len(return_shapes) == len(return_values)
744
+ for i, v in enumerate(return_values):
745
+ op_id = self.jitval_operand_map[v]
746
+ self.outputs.append(op_id)
747
+ out_dim_orders.append(self.operands[op_id].dim_order.value)
748
+ shape = return_shapes[i] if return_shapes else None
749
+ template_return_lines.append(
750
+ self.operand_to_template_torchscript(op_id, self.operands[op_id], shape)
751
+ + ","
752
+ )
753
+ template_return_lines.append("]")
754
+
755
+ model = []
756
+
757
+ version = 1
758
+ header = struct.pack(
759
+ "iiiiii",
760
+ version,
761
+ len(self.operands),
762
+ len(self.values),
763
+ len(self.operations),
764
+ len(self.inputs),
765
+ len(self.outputs),
766
+ )
767
+ model.append(header)
768
+
769
+ serialized_values, serialized_value_data = self.serialize_values()
770
+
771
+ model.extend(
772
+ struct.pack("iifi", t, len(d), s, z) for (t, d, _m, s, z) in self.operands
773
+ )
774
+ model.extend(serialized_values)
775
+ model.extend(struct.pack("iii", *x) for x in self.operations)
776
+
777
+ # Compact the model so we can get its length so far.
778
+ model = [b"".join(model)]
779
+ model_offset = len(model[0])
780
+ # Model offset is the index into the model (in 32-bit words, not bytes)
781
+ # of the next dimension we're about to serialize. If it's 0,
782
+ # generate code to mutate it before passing to NNAPI.
783
+ assert model_offset % 4 == 0
784
+ model_offset = int(model_offset / 4)
785
+
786
+ for op_id, (_, dims, dim_order, _, _) in enumerate(self.operands):
787
+ shape = fix_shape(dims, dim_order)
788
+ for d, s in enumerate(shape):
789
+ if s == 0:
790
+ pt_d = reverse_map_dim(dim_order, d)
791
+ self.flexible_shape_computation_lines.append(
792
+ f"ser_model[{model_offset}] = {flex_name(op_id, pt_d)}"
793
+ )
794
+ model_offset += 1
795
+
796
+ # convert runtime flex shape from -1 to 0
797
+ shape = tuple(d if d != -1 else 0 for d in shape)
798
+ model.append(self.serialize_ints(shape))
799
+
800
+ model.extend(serialized_value_data)
801
+ model.append(self.serialize_ints(self.operation_args))
802
+ model.append(self.serialize_ints(self.inputs))
803
+ model.append(self.serialize_ints(self.outputs))
804
+
805
+ self.flexible_shape_computation_lines.extend(template_return_lines)
806
+
807
+ return (
808
+ array.array("i", b"".join(model)),
809
+ self.used_weights,
810
+ inp_dim_orders,
811
+ out_dim_orders,
812
+ self.flexible_shape_computation_lines,
813
+ retval_count,
814
+ )
815
+
816
+ def serialize_values(self):
817
+ serialized_values = []
818
+ serialized_value_data = []
819
+ assert len(self.values) == len(self.value_data)
820
+ for (op_index, source_type), data in zip(self.values, self.value_data):
821
+ source_length = len(data)
822
+
823
+ # Pad with 0 bytes out to a multiple of 4 for alignment.
824
+ physical_length = ((source_length - 1) | 0x3) + 1
825
+ padded_data = data + (b"\0" * (physical_length - source_length))
826
+
827
+ serialized_values.append(
828
+ struct.pack("iii", op_index, source_type, source_length)
829
+ )
830
+ serialized_value_data.append(padded_data)
831
+
832
+ return serialized_values, serialized_value_data
833
+
834
+ @staticmethod
835
+ def serialize_ints(ints):
836
+ return array.array("i", ints).tobytes()
837
+
838
+ ADDER_MAP = {
839
+ "prim::GetAttr": lambda self, node: self.add_getattr(node),
840
+ "prim::Constant": lambda self, node: self.add_constant_node(node),
841
+ "prim::ListConstruct": lambda self, node: self.add_list_construct(node),
842
+ "prim::TupleConstruct": lambda self, node: self.add_tuple_construct(node),
843
+ "aten::unsqueeze": lambda self, node: self.add_unsqueeze(node),
844
+ "aten::to": lambda self, node: self.add_to(node),
845
+ "aten::detach": lambda self, node: self._identity(node),
846
+ "aten::reshape": lambda self, node: self.add_reshape(node),
847
+ "aten::flatten": lambda self, node: self.add_flatten(node),
848
+ "aten::slice": lambda self, node: self.add_slice(node),
849
+ "aten::size": lambda self, node: self.add_size(node),
850
+ "aten::cat": lambda self, node: self.add_cat(node),
851
+ "aten::mean": lambda self, node: self.add_mean(node),
852
+ "aten::quantize_per_tensor": lambda self, node: self.add_quantize(node),
853
+ "aten::dequantize": lambda self, node: self.add_dequantize(node),
854
+ "aten::add": lambda self, node: self.add_add_sub_op(
855
+ node, NNAPI_OperationCode.ADD, NNAPI_FuseCode.FUSED_NONE
856
+ ),
857
+ "aten::sub": lambda self, node: self.add_add_sub_op(
858
+ node, NNAPI_OperationCode.SUB, NNAPI_FuseCode.FUSED_NONE
859
+ ),
860
+ "aten::mul": lambda self, node: self.add_pointwise_simple_binary_broadcast_op(
861
+ node, NNAPI_OperationCode.MUL, NNAPI_FuseCode.FUSED_NONE
862
+ ),
863
+ "aten::div": lambda self, node: self.add_pointwise_simple_binary_broadcast_op(
864
+ node, NNAPI_OperationCode.DIV, NNAPI_FuseCode.FUSED_NONE
865
+ ),
866
+ "aten::relu": lambda self, node: self.add_pointwise_simple_unary_op(
867
+ node, NNAPI_OperationCode.RELU
868
+ ),
869
+ "aten::sigmoid": lambda self, node: self.add_pointwise_simple_unary_op(
870
+ node, NNAPI_OperationCode.LOGISTIC
871
+ ),
872
+ "aten::softmax": lambda self, node: self.add_softmax(node),
873
+ "aten::hardtanh": lambda self, node: self.add_hardtanh(node),
874
+ "aten::avg_pool2d": lambda self, node: self.add_avg_pool2d(node),
875
+ "aten::max_pool2d": lambda self, node: self.add_pool2d_node(
876
+ node, NNAPI_OperationCode.MAX_POOL_2D
877
+ ),
878
+ "aten::adaptive_avg_pool2d": lambda self, node: self.add_adaptive_avg_pool2d(
879
+ node
880
+ ),
881
+ "aten::upsample_nearest2d": lambda self, node: self.add_upsample_nearest2d(
882
+ node
883
+ ),
884
+ "aten::prelu": lambda self, node: self.add_prelu_op(node),
885
+ "aten::addmm": lambda self, node: self.add_addmm(node),
886
+ "aten::linear": lambda self, node: self.add_linear(node),
887
+ "aten::_convolution": lambda self, node: self.add_conv_underscore(node),
888
+ "aten::conv2d": lambda self, node: self.add_conv2d(node),
889
+ "aten::log_softmax": lambda self, node: self.add_log_softmax(node),
890
+ "quantized::linear": lambda self, node: self.add_qlinear(node),
891
+ "quantized::conv2d": lambda self, node: self.add_qconv2d(
892
+ node, NNAPI_FuseCode.FUSED_NONE
893
+ ),
894
+ "quantized::conv2d_relu": lambda self, node: self.add_qconv2d(
895
+ node, NNAPI_FuseCode.FUSED_RELU
896
+ ),
897
+ "quantized::conv_transpose2d": lambda self, node: self.add_qconv2d(
898
+ node, NNAPI_FuseCode.FUSED_NONE, transpose=True
899
+ ),
900
+ "quantized::add": lambda self, node: self.add_qadd(
901
+ node, NNAPI_OperationCode.ADD, NNAPI_FuseCode.FUSED_NONE
902
+ ),
903
+ "quantized::add_relu": lambda self, node: self.add_qadd(
904
+ node, NNAPI_OperationCode.ADD, NNAPI_FuseCode.FUSED_RELU
905
+ ),
906
+ "quantized::mul": lambda self, node: self.add_qadd(
907
+ node, NNAPI_OperationCode.MUL, NNAPI_FuseCode.FUSED_NONE
908
+ ),
909
+ }
910
+
911
+ def add_node(self, node):
912
+ adder = self.ADDER_MAP.get(node.kind())
913
+ if not adder:
914
+ raise Exception( # noqa: TRY002
915
+ f"Unsupported node kind ({node.kind()!r}) in node {node!r}"
916
+ ) # noqa: TRY002
917
+ adder(self, node)
918
+
919
+ def _identity(self, node):
920
+ in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0))
921
+ jitval = node.outputsAt(0)
922
+ self.jitval_operand_map[jitval] = in_id
923
+
924
+ def add_getattr(self, node):
925
+ assert node.inputsSize() == 1
926
+ assert node.outputsSize() == 1
927
+ obj_ctype, obj = self.get_constant_value(node.inputsAt(0))
928
+ assert str(obj_ctype).startswith("__torch__.")
929
+ name = node.s("name")
930
+ value = getattr(obj, name)
931
+ output = node.outputsAt(0)
932
+ ctype = output.type()
933
+ self.add_constant_value(output, ctype, value)
934
+
935
+ def add_constant_node(self, node):
936
+ assert node.inputsSize() == 0
937
+ assert node.outputsSize() == 1
938
+ output = node.outputsAt(0)
939
+ ctype = output.type()
940
+ value = output.toIValue()
941
+ self.add_constant_value(output, ctype, value)
942
+
943
+ def add_list_construct(self, node):
944
+ assert node.outputsSize() == 1
945
+ output = node.outputsAt(0)
946
+ ctype = output.type()
947
+ const_vals: Optional[List] = []
948
+ tensors: Optional[List] = []
949
+ for inp in node.inputs():
950
+ if const_vals is not None and inp in self.constants:
951
+ _, val = self.get_constant_value(inp)
952
+ const_vals.append(val)
953
+ else:
954
+ const_vals = None
955
+ if tensors is not None and inp.type().kind() == "TensorType":
956
+ tensors.append(inp)
957
+ else:
958
+ tensors = None
959
+
960
+ if const_vals is not None:
961
+ # NOTE: Now that TorchScript supports list constants,
962
+ # this code path might not be used anymore.
963
+ self.add_constant_value(output, ctype, const_vals)
964
+ if tensors is not None:
965
+ self.add_tensor_sequence(output, tensors)
966
+ if const_vals is None and tensors is None:
967
+ raise Exception( # noqa: TRY002
968
+ f"Unable to handle ListConstruct node. Neither all constants nor all tensors. {node!r}"
969
+ )
970
+
971
+ def add_tuple_construct(self, node):
972
+ assert node.outputsSize() == 1
973
+ output = node.outputsAt(0)
974
+ values = list(node.inputs())
975
+ self.add_tensor_sequence(output, values)
976
+
977
+ def add_unsqueeze(self, node):
978
+ assert node.inputsSize() == 2
979
+ assert node.outputsSize() == 1
980
+
981
+ in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0))
982
+
983
+ _, dim = self.get_constant_value(node.inputsAt(1), "IntType")
984
+ assert in_oper.dim_order == DimOrder.PRESUMED_CONTIGUOUS
985
+
986
+ real_dim = dim if dim >= 0 else dim + len(in_oper.shape) + 1
987
+ out_shape_list = list(in_oper.shape)
988
+ out_shape_list.insert(real_dim, 1)
989
+ out_shape = tuple(out_shape_list)
990
+ out_oper = in_oper._replace(shape=out_shape)
991
+
992
+ inputs = [None] * 2
993
+ inputs[0] = in_id
994
+ inputs[1] = self.add_immediate_int_scalar(dim)
995
+
996
+ outputs = [None] * 1
997
+ outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper)
998
+
999
+ self.add_operation(NNAPI_OperationCode.EXPAND_DIMS, inputs, outputs)
1000
+
1001
+ def add_to(self, node):
1002
+ # Handle to("cpu") / to("gpu") case
1003
+ self._identity(node)
1004
+
1005
+ def add_reshape(self, node):
1006
+ assert node.inputsSize() == 2
1007
+ assert node.outputsSize() == 1
1008
+
1009
+ in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0))
1010
+
1011
+ shape_ctype, shape = self.get_constant_value(node.inputsAt(1))
1012
+ assert shape_ctype.kind() == "ListType"
1013
+ assert shape_ctype.getElementType().kind() == "IntType"
1014
+ is_trivial_reshape = len(shape) == 2 and shape[1] == -1
1015
+
1016
+ if in_oper.dim_order != DimOrder.PRESUMED_CONTIGUOUS and not is_trivial_reshape:
1017
+ raise Exception( # noqa: TRY002
1018
+ "Currently, reshape is only supported on NHWC tensors if the target size is [X, -1]."
1019
+ )
1020
+
1021
+ # Bit of a hack here. Use a real tensor to infer the output shape.
1022
+ out_shape = torch.zeros(1).expand(in_oper.shape).reshape(shape).shape
1023
+ out_oper = in_oper._replace(
1024
+ shape=out_shape, dim_order=DimOrder.PRESUMED_CONTIGUOUS
1025
+ )
1026
+
1027
+ inputs = [None] * 2
1028
+ inputs[0] = in_id
1029
+ inputs[1] = self.add_immediate_int_vector(shape)
1030
+
1031
+ outputs = [None] * 1
1032
+ outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper)
1033
+
1034
+ self.add_operation(NNAPI_OperationCode.RESHAPE, inputs, outputs)
1035
+
1036
+ def add_flatten(self, node):
1037
+ assert node.inputsSize() == 3
1038
+ assert node.outputsSize() == 1
1039
+
1040
+ in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0))
1041
+
1042
+ start_ctype, start_dim = self.get_constant_value(node.inputsAt(1), "IntType")
1043
+ end_ctype, end_dim = self.get_constant_value(node.inputsAt(2), "IntType")
1044
+
1045
+ # channels last with channels == 1 or (height & width both 1)
1046
+ is_trivial_flatten = len(in_oper.shape) == 4 and (
1047
+ in_oper.shape[1] == 1 or (in_oper.shape[2] == 1 and in_oper.shape[3] == 1)
1048
+ )
1049
+ if in_oper.dim_order != DimOrder.PRESUMED_CONTIGUOUS and not is_trivial_flatten:
1050
+ raise Exception( # noqa: TRY002
1051
+ "Currently, flatten is not supported on NHWC tensors unless C=1 or H=W=1"
1052
+ )
1053
+
1054
+ if start_dim < 0:
1055
+ start_dim += len(in_oper.shape)
1056
+ if end_dim < 0:
1057
+ end_dim += len(in_oper.shape)
1058
+
1059
+ out_shape = (
1060
+ in_oper.shape[:start_dim]
1061
+ + (functools.reduce(operator.mul, in_oper.shape[start_dim : end_dim + 1]),)
1062
+ + in_oper.shape[end_dim + 1 :]
1063
+ )
1064
+
1065
+ if any(dim == 0 for dim in in_oper.shape[start_dim : end_dim + 1]):
1066
+ raise Exception( # noqa: TRY002
1067
+ "Flattening flexible dims is not supported yet"
1068
+ ) # noqa: TRY002
1069
+ non_flattened_dims = in_oper.shape[:start_dim] + in_oper.shape[end_dim + 1 :]
1070
+ if non_flattened_dims.count(0) > 1:
1071
+ raise Exception("Only 1 dim can be flexible") # noqa: TRY002
1072
+
1073
+ out_oper = in_oper._replace(
1074
+ shape=out_shape, dim_order=DimOrder.PRESUMED_CONTIGUOUS
1075
+ )
1076
+ out_id = self.add_tensor_operand(node.outputsAt(0), out_oper)
1077
+
1078
+ for idx, dim in enumerate(out_shape):
1079
+ if dim == 0:
1080
+ self.forward_operand_shape(out_id, idx, in_id, in_oper.shape.index(0))
1081
+
1082
+ inputs_1 = tuple(dim if dim != 0 else -1 for dim in out_shape)
1083
+ inputs = [None] * 2
1084
+ inputs[0] = in_id
1085
+ inputs[1] = self.add_immediate_int_vector(inputs_1)
1086
+
1087
+ outputs = [None] * 1
1088
+ outputs[0] = out_id
1089
+
1090
+ self.add_operation(NNAPI_OperationCode.RESHAPE, inputs, outputs)
1091
+
1092
+ def add_slice(self, node):
1093
+ assert node.inputsSize() == 5
1094
+ assert node.outputsSize() == 1
1095
+
1096
+ in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0))
1097
+ _, dim_value = self.get_constant_value(node.inputsAt(1))
1098
+ _, start_value = self.get_constant_value(node.inputsAt(2))
1099
+ _, stop_value = self.get_constant_value(node.inputsAt(3))
1100
+ _, step_value = self.get_constant_value(node.inputsAt(4))
1101
+
1102
+ if start_value is None:
1103
+ start_value = 0
1104
+ if stop_value is None:
1105
+ stop_value = sys.maxsize
1106
+
1107
+ if start_value < 0:
1108
+ start_value += in_oper.shape[dim_value]
1109
+ elif start_value == sys.maxsize:
1110
+ start_value = 0
1111
+
1112
+ if start_value == 0 and stop_value == sys.maxsize:
1113
+ self._identity(node)
1114
+ return
1115
+
1116
+ if in_oper.shape[dim_value] == 0:
1117
+ raise Exception("Unable to slice with flexible shape") # noqa: TRY002
1118
+
1119
+ if stop_value < 0:
1120
+ stop_value += in_oper.shape[dim_value]
1121
+ elif stop_value == sys.maxsize:
1122
+ stop_value = in_oper.shape[dim_value]
1123
+
1124
+ if start_value >= stop_value:
1125
+ raise Exception( # noqa: TRY002
1126
+ "Slice start value should be less than stop value"
1127
+ ) # noqa: TRY002
1128
+
1129
+ out_len = (stop_value - start_value) // step_value
1130
+ out_shape = tuple(
1131
+ out_len if i == dim_value else dim for i, dim in enumerate(in_oper.shape)
1132
+ )
1133
+ out_id = self.add_tensor_operand(
1134
+ node.outputsAt(0), in_oper._replace(shape=out_shape)
1135
+ )
1136
+
1137
+ # flex inputs
1138
+ end_mask = 0
1139
+ for idx, dim in enumerate(out_shape):
1140
+ if dim == 0:
1141
+ self.forward_operand_shape(out_id, idx, in_id, idx)
1142
+ end_mask |= 1 << idx
1143
+
1144
+ inputs = [None] * 7
1145
+ inputs[0] = in_id
1146
+ inputs[1] = self.add_immediate_int_vector(
1147
+ [start_value if i == dim_value else 0 for i in range(len(in_oper.shape))]
1148
+ )
1149
+ inputs[2] = self.add_immediate_int_vector(
1150
+ [
1151
+ stop_value if i == dim_value else dim
1152
+ for i, dim in enumerate(in_oper.shape)
1153
+ ]
1154
+ )
1155
+ inputs[3] = self.add_immediate_int_vector(
1156
+ [step_value if i == dim_value else 1 for i in range(len(in_oper.shape))]
1157
+ )
1158
+ inputs[4] = self.add_immediate_int_scalar(0) # begin mask
1159
+ inputs[5] = self.add_immediate_int_scalar(end_mask)
1160
+ inputs[6] = self.add_immediate_int_scalar(0) # shrink axis mas
1161
+
1162
+ outputs = [None] * 1
1163
+ outputs[0] = out_id
1164
+
1165
+ self.add_operation(NNAPI_OperationCode.STRIDED_SLICE, inputs, outputs)
1166
+
1167
+ def add_size(self, node):
1168
+ assert node.inputsSize() == 2
1169
+ assert node.outputsSize() == 1
1170
+
1171
+ _, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0))
1172
+ _, value = self.constants[node.inputsAt(1)]
1173
+ res = in_oper.shape[value]
1174
+ output = node.outputsAt(0)
1175
+ self.add_constant_value(output, output.type(), res)
1176
+
1177
+ def add_cat(self, node):
1178
+ assert node.inputsSize() == 2
1179
+ assert node.outputsSize() == 1
1180
+
1181
+ tensors = self.tensor_sequences[node.inputsAt(0)]
1182
+ _, dim = self.get_constant_value(node.inputsAt(1), "IntType")
1183
+
1184
+ assert len(tensors) > 0
1185
+ in_ids = []
1186
+ out_oper = None
1187
+ out_dim_size = 0
1188
+ for inp in tensors:
1189
+ in_id, in_oper = self.get_tensor_operand_by_jitval(inp)
1190
+ if out_oper is None:
1191
+ out_shape = change_element(in_oper.shape, dim, -1)
1192
+ out_oper = in_oper._replace(shape=out_shape)
1193
+ assert in_oper.op_type == out_oper.op_type
1194
+ assert in_oper.dim_order == out_oper.dim_order
1195
+ assert change_element(in_oper.shape, dim, -1) == change_element(
1196
+ out_oper.shape, dim, -1
1197
+ )
1198
+ # TODO: Possibly check scale and zero point.
1199
+ in_ids.append(in_id)
1200
+ # TODO: Possibly support variable-sized inputs.
1201
+ out_dim_size += in_oper.shape[dim]
1202
+
1203
+ assert out_oper is not None
1204
+ out_oper = out_oper._replace(
1205
+ shape=change_element(out_oper.shape, dim, out_dim_size)
1206
+ )
1207
+
1208
+ if in_oper.dim_order == DimOrder.CHANNELS_LAST: # type: ignore[possibly-undefined]
1209
+ assert len(out_oper.shape) == 4
1210
+ nnapi_dim = [0, 3, 1, 2][dim]
1211
+ else:
1212
+ nnapi_dim = dim
1213
+
1214
+ out_id = self.add_tensor_operand(node.outputsAt(0), out_oper)
1215
+ for idx, d in enumerate(out_oper.shape):
1216
+ if d == 0:
1217
+ if idx == dim:
1218
+ shape = " + ".join(flex_name(ip_id, dim) for ip_id in in_ids)
1219
+ self.compute_operand_shape(out_id, idx, shape)
1220
+ else:
1221
+ self.forward_operand_shape(out_id, idx, in_ids[0], idx)
1222
+
1223
+ inputs = in_ids + [self.add_immediate_int_scalar(nnapi_dim)]
1224
+
1225
+ outputs = [None] * 1
1226
+ outputs[0] = out_id
1227
+
1228
+ self.add_operation(NNAPI_OperationCode.CONCATENATION, inputs, outputs)
1229
+
1230
+ def add_mean(self, node):
1231
+ assert node.inputsSize() == 4
1232
+ assert node.outputsSize() == 1
1233
+
1234
+ in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0))
1235
+ dim_ctype, dim = self.get_constant_value(node.inputsAt(1))
1236
+ assert dim_ctype.kind() == "ListType"
1237
+ assert dim_ctype.getElementType().kind() == "IntType"
1238
+ _, keep_dim = self.get_constant_value(node.inputsAt(2), "BoolType")
1239
+ # Expect None for dtype
1240
+ self.get_constant_value(node.inputsAt(3), "NoneType")
1241
+
1242
+ if in_oper.dim_order == DimOrder.CHANNELS_LAST:
1243
+ assert len(in_oper.shape) == 4
1244
+ nnapi_dim = [[0, 3, 1, 2][d] for d in dim]
1245
+ else:
1246
+ nnapi_dim = dim
1247
+
1248
+ collapsed_dims = set()
1249
+ for d in dim:
1250
+ if d < 0:
1251
+ d += len(in_oper.shape)
1252
+ collapsed_dims.add(d)
1253
+
1254
+ if in_oper.dim_order == DimOrder.CHANNELS_LAST and not keep_dim:
1255
+ assert collapsed_dims.issuperset({2, 3})
1256
+ out_dim_order = DimOrder.PRESUMED_CONTIGUOUS
1257
+ else:
1258
+ out_dim_order = in_oper.dim_order
1259
+
1260
+ out_shape = []
1261
+ for i, s in enumerate(in_oper.shape):
1262
+ if i not in collapsed_dims:
1263
+ out_shape.append(s)
1264
+ elif keep_dim:
1265
+ out_shape.append(1)
1266
+
1267
+ out_oper = in_oper._replace(shape=out_shape, dim_order=out_dim_order)
1268
+
1269
+ inputs = [None] * 3
1270
+ inputs[0] = in_id
1271
+ inputs[1] = self.add_immediate_int_vector(nnapi_dim)
1272
+ inputs[2] = self.add_immediate_int_scalar(keep_dim)
1273
+
1274
+ outputs = [None] * 1
1275
+ outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper)
1276
+
1277
+ self.add_operation(NNAPI_OperationCode.MEAN, inputs, outputs)
1278
+
1279
+ def add_quantize(self, node):
1280
+ assert node.inputsSize() == 4
1281
+ assert node.outputsSize() == 1
1282
+
1283
+ in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0))
1284
+ if in_oper.dim_order != DimOrder.CHANNELS_LAST:
1285
+ raise Exception( # noqa: TRY002
1286
+ "Most hardware backends prefer NHWC quantized tensors. "
1287
+ "Try setting `t.nnapi_nhwc = True` on your tensor inputs. "
1288
+ )
1289
+ _, scale = self.get_constant_value(node.inputsAt(1), "FloatType")
1290
+ _, zero_point = self.get_constant_value(node.inputsAt(2), "IntType")
1291
+ _, scalar_type = self.get_constant_value(node.inputsAt(3), "IntType")
1292
+ if scalar_type != TorchScalarTypes.QUINT8.value:
1293
+ raise Exception( # noqa: TRY002
1294
+ "PyTorch NNAPI export only supports quantized tensors "
1295
+ "with the quint8 dtype."
1296
+ )
1297
+ op_type = NNAPI_OperandCode.TENSOR_QUANT8_ASYMM
1298
+
1299
+ out_oper = in_oper._replace(
1300
+ op_type=op_type,
1301
+ scale=scale,
1302
+ zero_point=zero_point,
1303
+ )
1304
+
1305
+ inputs = [None] * 1
1306
+ inputs[0] = in_id
1307
+
1308
+ outputs = [None] * 1
1309
+ outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper)
1310
+
1311
+ self.add_operation(NNAPI_OperationCode.QUANTIZE, inputs, outputs)
1312
+
1313
+ def add_dequantize(self, node):
1314
+ assert node.inputsSize() == 1
1315
+ assert node.outputsSize() == 1
1316
+
1317
+ in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0))
1318
+ out_oper = in_oper._replace(
1319
+ op_type=NNAPI_OperandCode.TENSOR_FLOAT32,
1320
+ scale=0.0,
1321
+ zero_point=0,
1322
+ )
1323
+
1324
+ inputs = [None] * 1
1325
+ inputs[0] = in_id
1326
+
1327
+ outputs = [None] * 1
1328
+ outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper)
1329
+
1330
+ self.add_operation(NNAPI_OperationCode.DEQUANTIZE, inputs, outputs)
1331
+
1332
+ def add_pointwise_simple_unary_op(self, node, opcode):
1333
+ assert node.inputsSize() == 1
1334
+ assert node.outputsSize() == 1
1335
+
1336
+ in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0))
1337
+
1338
+ out_oper = in_oper
1339
+ if opcode == NNAPI_OperationCode.LOGISTIC:
1340
+ # NNAPI docs: For ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, the scale
1341
+ # must be 1.f / 256 and the zeroPoint must be 0.
1342
+ # https://fburl.com/h52stoog
1343
+ if in_oper.op_type == NNAPI_OperandCode.TENSOR_QUANT8_ASYMM:
1344
+ out_oper = in_oper._replace(zero_point=0, scale=1.0 / 256)
1345
+
1346
+ out_id = self.add_tensor_operand(node.outputsAt(0), out_oper)
1347
+
1348
+ for idx, dim in enumerate(in_oper.shape):
1349
+ if dim == 0:
1350
+ self.forward_operand_shape(out_id, idx, in_id, idx)
1351
+
1352
+ inputs = [None] * 1
1353
+ inputs[0] = in_id
1354
+
1355
+ outputs = [None] * 1
1356
+ outputs[0] = out_id
1357
+
1358
+ self.add_operation(opcode, inputs, outputs)
1359
+
1360
+ def _do_add_binary(self, node, opcode, fuse_code, *, qparams=None): # noqa: D401
1361
+ """Helper for pointwise binary broadcast ops with superfluous extra args."""
1362
+ assert node.outputsSize() == 1
1363
+
1364
+ assert node.inputsAt(0).type().kind() == "TensorType"
1365
+ assert node.inputsAt(1).type().kind() == "TensorType"
1366
+
1367
+ if self.has_operand_for_jitval(node.inputsAt(0)):
1368
+ in0_id, in0_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0))
1369
+ in1_id, in1_oper = self.get_tensor_operand_or_constant(
1370
+ node.inputsAt(1), in0_oper.dim_order
1371
+ )
1372
+ elif self.has_operand_for_jitval(node.inputsAt(1)):
1373
+ in1_id, in1_oper = self.get_tensor_operand_by_jitval(node.inputsAt(1))
1374
+ in0_id, in0_oper = self.get_tensor_operand_or_constant(
1375
+ node.inputsAt(0), in1_oper.dim_order
1376
+ )
1377
+ else:
1378
+ raise Exception( # noqa: TRY002
1379
+ f"Can't do a NNAPI binary op: {opcode} on two constants"
1380
+ ) # noqa: TRY002
1381
+
1382
+ assert in0_oper.op_type == in1_oper.op_type
1383
+ in0_id, in0_oper, in1_id, in1_oper = self.transpose_for_broadcast(
1384
+ in0_id, in0_oper, in1_id, in1_oper
1385
+ )
1386
+ # NOTE: PyTorch and NNAPI have the same broadcast semantics.
1387
+ out_shape = broadcast_shapes(in0_oper.shape, in1_oper.shape)
1388
+ out_oper = in0_oper._replace(shape=out_shape)
1389
+ if qparams is not None:
1390
+ scale, zp = qparams
1391
+ out_oper = out_oper._replace(scale=scale, zero_point=zp)
1392
+
1393
+ out_id = self.add_tensor_operand(node.outputsAt(0), out_oper)
1394
+ for idx, (d0, d1) in enumerate(zip(in0_oper.shape, in1_oper.shape)):
1395
+ if d0 == 1 and d1 == 0:
1396
+ self.forward_operand_shape(out_id, idx, in1_id, idx)
1397
+ elif d0 == 0 and d1 == 1:
1398
+ self.forward_operand_shape(out_id, idx, in0_id, idx)
1399
+ elif d0 == 0 and d1 == 0:
1400
+ self.flexible_shape_computation_lines.append(
1401
+ f"assert {flex_name(in0_id, idx)} == {flex_name(in1_id, idx)}"
1402
+ )
1403
+ self.forward_operand_shape(out_id, idx, in0_id, idx)
1404
+
1405
+ inputs = [None] * 3
1406
+ inputs[0] = in0_id
1407
+ inputs[1] = in1_id
1408
+ inputs[2] = self.add_immediate_int_scalar(fuse_code)
1409
+
1410
+ outputs = [None] * 1
1411
+ outputs[0] = out_id
1412
+
1413
+ self.add_operation(opcode, inputs, outputs)
1414
+
1415
+ def add_pointwise_simple_binary_broadcast_op(self, node, opcode, fuse_code):
1416
+ assert node.inputsSize() == 2
1417
+ self._do_add_binary(node, opcode, fuse_code)
1418
+
1419
+ def add_add_sub_op(self, node, opcode, fuse_code):
1420
+ assert node.inputsSize() == 3
1421
+
1422
+ _, alpha = self.get_constant_value(node.inputsAt(2), "IntType")
1423
+ if alpha != 1:
1424
+ raise Exception( # noqa: TRY002
1425
+ "NNAPI does not support add/sub with alpha."
1426
+ ) # noqa: TRY002
1427
+
1428
+ self._do_add_binary(node, opcode, fuse_code)
1429
+
1430
+ def add_qadd(self, node, opcode, fuse_code):
1431
+ assert node.inputsSize() == 4
1432
+
1433
+ _, scale = self.get_constant_value(node.inputsAt(2), "FloatType")
1434
+ _, zero_point = self.get_constant_value(node.inputsAt(3), "IntType")
1435
+
1436
+ self._do_add_binary(node, opcode, fuse_code, qparams=(scale, zero_point))
1437
+
1438
+ def add_softmax(self, node):
1439
+ assert node.inputsSize() == 3
1440
+ in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0))
1441
+
1442
+ _, softmax_dim = self.get_constant_value(node.inputsAt(1), "IntType")
1443
+
1444
+ out_id = self.add_tensor_operand(node.outputsAt(0), in_oper)
1445
+ for dim, size in enumerate(in_oper.shape):
1446
+ if size == 0:
1447
+ self.forward_operand_shape(out_id, dim, in_id, dim)
1448
+
1449
+ inputs = [None] * 3
1450
+ inputs[0] = in_id
1451
+ inputs[1] = self.add_immediate_float_scalar(
1452
+ 1.0
1453
+ ) # positive scaling factor of exponent, beta
1454
+ inputs[2] = self.add_immediate_int_scalar(softmax_dim)
1455
+
1456
+ outputs = [None] * 1
1457
+ outputs[0] = out_id
1458
+
1459
+ self.add_operation(NNAPI_OperationCode.SOFTMAX, inputs, outputs)
1460
+
1461
+ def add_hardtanh(self, node):
1462
+ assert node.inputsSize() == 3
1463
+ assert node.outputsSize() == 1
1464
+
1465
+ in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0))
1466
+ _, min_val = self.get_constant_value(node.inputsAt(1), "FloatType")
1467
+ _, max_val = self.get_constant_value(node.inputsAt(2), "FloatType")
1468
+
1469
+ op_map = {
1470
+ (-1, 1): NNAPI_OperationCode.RELU1,
1471
+ (0, 6): NNAPI_OperationCode.RELU6, # noqa: E201
1472
+ }
1473
+
1474
+ opcode = op_map.get((min_val, max_val))
1475
+ if opcode is None:
1476
+ raise Exception( # noqa: TRY002
1477
+ "NNAPI only supports hardtanh with args (-1, 1) or (0, 6)."
1478
+ ) # noqa: TRY002
1479
+
1480
+ inputs = [None] * 1
1481
+ inputs[0] = in_id
1482
+
1483
+ outputs = [None] * 1
1484
+ outputs[0] = self.add_tensor_operand(node.outputsAt(0), in_oper)
1485
+
1486
+ self.add_operation(opcode, inputs, outputs)
1487
+
1488
+ def add_prelu_op(self, node):
1489
+ assert node.inputsSize() == 2
1490
+ assert node.outputsSize() == 1
1491
+
1492
+ assert node.inputsAt(0).type().kind() == "TensorType"
1493
+ assert node.inputsAt(1).type().kind() == "TensorType"
1494
+
1495
+ in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0))
1496
+ w_id, w_oper = self.get_tensor_operand_for_weight(node.inputsAt(1))
1497
+ assert len(w_oper.shape) == 1
1498
+ assert w_oper.shape[0] > 0
1499
+ if w_oper.shape[0] > 1:
1500
+ if in_oper.use_nchw():
1501
+ # TODO: Support this by adding trailing 1 dims.
1502
+ raise Exception( # noqa: TRY002
1503
+ "Per-channel PReLU only supports channels_last right now."
1504
+ )
1505
+
1506
+ out_id = self.add_tensor_operand(node.outputsAt(0), in_oper)
1507
+ for dim, size in enumerate(in_oper.shape):
1508
+ if size > 0:
1509
+ pass
1510
+ elif dim <= 1:
1511
+ raise Exception( # noqa: TRY002
1512
+ "PReLU requires fixed size for dim 0 and dim 1."
1513
+ ) # noqa: TRY002
1514
+ else:
1515
+ self.forward_operand_shape(out_id, dim, in_id, dim)
1516
+
1517
+ inputs = [None] * 2
1518
+ inputs[0] = in_id
1519
+ inputs[1] = w_id
1520
+
1521
+ outputs = [None] * 1
1522
+ outputs[0] = out_id
1523
+
1524
+ self.add_operation(NNAPI_OperationCode.PRELU, inputs, outputs)
1525
+
1526
+ def add_pool2d_node(self, node, opcode):
1527
+ assert node.inputsSize() == 6
1528
+ assert node.outputsSize() == 1
1529
+ image, kernel, stride, padding, dilation, ceil_mode = node.inputs()
1530
+
1531
+ stride = stride or kernel
1532
+
1533
+ # TODO: Validate ceil_mode semantics.
1534
+
1535
+ args = self.get_conv_pool_args_2d_from_jit(
1536
+ self.get_size_arg(kernel), stride, padding, dilation
1537
+ )
1538
+ if args.dilation_h != 1 or args.dilation_w != 1:
1539
+ raise Exception("NNAPI does not support dilated pooling.") # noqa: TRY002
1540
+
1541
+ image_id, image_oper = self.get_tensor_operand_by_jitval_fixed_size(image)
1542
+ assert len(image_oper.shape) == 4
1543
+
1544
+ out_shape = get_conv_pool_shape(
1545
+ image_oper.shape, args, image_oper.shape[1], False
1546
+ )
1547
+ use_nchw = image_oper.use_nchw()
1548
+
1549
+ inputs = [None] * 11
1550
+ inputs[0] = image_id
1551
+ inputs[1] = self.add_immediate_int_scalar(args.pad_l)
1552
+ inputs[2] = self.add_immediate_int_scalar(args.pad_r)
1553
+ inputs[3] = self.add_immediate_int_scalar(args.pad_t)
1554
+ inputs[4] = self.add_immediate_int_scalar(args.pad_b)
1555
+ inputs[5] = self.add_immediate_int_scalar(args.stride_w)
1556
+ inputs[6] = self.add_immediate_int_scalar(args.stride_h)
1557
+ inputs[7] = self.add_immediate_int_scalar(args.kernel_w)
1558
+ inputs[8] = self.add_immediate_int_scalar(args.kernel_h)
1559
+ inputs[9] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE)
1560
+ inputs[10] = self.add_immediate_bool_scalar(use_nchw)
1561
+
1562
+ outputs = [None] * 1
1563
+ outputs[0] = self.add_tensor_operand(
1564
+ node.outputsAt(0), image_oper._replace(shape=out_shape)
1565
+ )
1566
+
1567
+ self.add_operation(opcode, inputs, outputs)
1568
+
1569
+ def add_avg_pool2d(self, node):
1570
+ assert node.inputsSize() == 7
1571
+ assert node.outputsSize() == 1
1572
+ (
1573
+ image,
1574
+ kernel,
1575
+ stride,
1576
+ padding,
1577
+ ceil_mode,
1578
+ count_include_pad,
1579
+ divisor_override,
1580
+ ) = node.inputs()
1581
+
1582
+ _, count_include_pad_value = self.get_constant_value(count_include_pad)
1583
+ _, divisor_override_value = self.get_constant_value(divisor_override)
1584
+ if not count_include_pad_value or divisor_override_value:
1585
+ raise Exception( # noqa: TRY002
1586
+ "NNAPI doesn't support count_include_pad=False or divisor_override"
1587
+ )
1588
+
1589
+ args = self.get_conv_pool_args_2d_from_jit(
1590
+ self.get_size_arg(kernel), stride, padding
1591
+ )
1592
+
1593
+ image_id, image_oper = self.get_tensor_operand_by_jitval(image)
1594
+ assert len(image_oper.shape) == 4
1595
+
1596
+ out_shape = get_conv_pool_shape(
1597
+ image_oper.shape, args, image_oper.shape[1], False
1598
+ )
1599
+ use_nchw = image_oper.use_nchw()
1600
+
1601
+ inputs = [None] * 11
1602
+ inputs[0] = image_id
1603
+ inputs[1] = self.add_immediate_int_scalar(args.pad_l)
1604
+ inputs[2] = self.add_immediate_int_scalar(args.pad_r)
1605
+ inputs[3] = self.add_immediate_int_scalar(args.pad_t)
1606
+ inputs[4] = self.add_immediate_int_scalar(args.pad_b)
1607
+ inputs[5] = self.add_immediate_int_scalar(args.stride_w)
1608
+ inputs[6] = self.add_immediate_int_scalar(args.stride_h)
1609
+ inputs[7] = self.add_immediate_int_scalar(args.kernel_w)
1610
+ inputs[8] = self.add_immediate_int_scalar(args.kernel_h)
1611
+ inputs[9] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE)
1612
+ inputs[10] = self.add_immediate_bool_scalar(use_nchw)
1613
+
1614
+ outputs = [None] * 1
1615
+ out_id = self.add_tensor_operand(
1616
+ node.outputsAt(0), image_oper._replace(shape=out_shape)
1617
+ )
1618
+ self._handle_conv_pool_flexible_input(out_id, image, args, False)
1619
+ outputs[0] = out_id
1620
+
1621
+ self.add_operation(NNAPI_OperationCode.AVERAGE_POOL_2D, inputs, outputs)
1622
+
1623
+ def add_adaptive_avg_pool2d(self, node):
1624
+ assert node.inputsSize() == 2
1625
+ assert node.outputsSize() == 1
1626
+
1627
+ image_id, image_oper = self.get_tensor_operand_by_jitval_fixed_size(
1628
+ node.inputsAt(0)
1629
+ )
1630
+ assert len(image_oper.shape) == 4
1631
+
1632
+ size_ctype, size_arg = self.get_constant_value(node.inputsAt(1))
1633
+ assert size_ctype.kind() == "ListType"
1634
+ assert size_ctype.getElementType().kind() == "IntType"
1635
+ if size_arg != [1, 1]:
1636
+ raise Exception( # noqa: TRY002
1637
+ "NNAPI only supports adaptive_avg_pool2d with output size (1, 1)."
1638
+ )
1639
+
1640
+ out_shape = image_oper.shape[0:2] + tuple(size_arg)
1641
+ use_nchw = image_oper.use_nchw()
1642
+
1643
+ inputs = [None] * 11
1644
+ inputs[0] = image_id
1645
+ inputs[1] = self.add_immediate_int_scalar(0)
1646
+ inputs[2] = self.add_immediate_int_scalar(0)
1647
+ inputs[3] = self.add_immediate_int_scalar(0)
1648
+ inputs[4] = self.add_immediate_int_scalar(0)
1649
+ inputs[5] = self.add_immediate_int_scalar(1)
1650
+ inputs[6] = self.add_immediate_int_scalar(1)
1651
+ inputs[7] = self.add_immediate_int_scalar(image_oper.shape[3])
1652
+ inputs[8] = self.add_immediate_int_scalar(image_oper.shape[2])
1653
+ inputs[9] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE)
1654
+ inputs[10] = self.add_immediate_bool_scalar(use_nchw)
1655
+
1656
+ outputs = [None] * 1
1657
+ outputs[0] = self.add_tensor_operand(
1658
+ node.outputsAt(0), image_oper._replace(shape=out_shape)
1659
+ )
1660
+
1661
+ self.add_operation(NNAPI_OperationCode.AVERAGE_POOL_2D, inputs, outputs)
1662
+
1663
+ def add_upsample_nearest2d(self, node):
1664
+ assert node.inputsSize() == 3 or node.inputsSize() == 4
1665
+ assert node.outputsSize() == 1
1666
+ if node.inputsSize() == 3:
1667
+ image, size_jit, scale_jit = node.inputs()
1668
+ else:
1669
+ image, size_jit, scale_h_jit, scale_w_jit = node.inputs()
1670
+ size_ctype, size_arg = self.get_constant_value(size_jit)
1671
+
1672
+ if node.inputsSize() == 3:
1673
+ scale_ctype, scale_arg = self.get_constant_value(scale_jit) # type: ignore[possibly-undefined]
1674
+ else:
1675
+ scale_h_ctype, scale_h_arg = self.get_constant_value(scale_h_jit) # type: ignore[possibly-undefined]
1676
+ scale_w_ctype, scale_w_arg = self.get_constant_value(scale_w_jit) # type: ignore[possibly-undefined]
1677
+
1678
+ # The only way for the 4-argument overload of upsample_nearest2d to
1679
+ # have been added to the graph without error is if the scale_h and
1680
+ # scale_w arguments are None
1681
+ assert scale_h_ctype.kind() == "NoneType"
1682
+ assert scale_w_ctype.kind() == "NoneType"
1683
+
1684
+ scale_ctype = scale_h_ctype
1685
+ scale_arg = scale_h_arg
1686
+
1687
+ image_id, image_oper = self.get_tensor_operand_by_jitval(image)
1688
+ assert len(image_oper.shape) == 4
1689
+
1690
+ if size_ctype.kind() != "NoneType" and scale_ctype.kind() != "NoneType":
1691
+ raise Exception("Size and scale cannot both be non-None.") # noqa: TRY002
1692
+ elif size_ctype.kind() != "NoneType":
1693
+ assert size_ctype.kind() == "ListType"
1694
+ assert size_ctype.getElementType().kind() == "IntType"
1695
+ assert scale_ctype.kind() == "NoneType"
1696
+ assert scale_arg is None
1697
+ assert isinstance(size_arg, list)
1698
+ assert size_arg
1699
+ assert all(isinstance(val, int) for val in size_arg)
1700
+ if len(size_arg) == 1:
1701
+ size_arg = size_arg * 2
1702
+ assert len(size_arg) == 2
1703
+ out_h = size_arg[0]
1704
+ out_w = size_arg[1]
1705
+ arg_h = self.add_immediate_int_scalar(out_h)
1706
+ arg_w = self.add_immediate_int_scalar(out_w)
1707
+ elif scale_ctype.kind() != "NoneType":
1708
+ assert scale_ctype.kind() == "ListType"
1709
+ assert scale_ctype.getElementType().kind() == "FloatType"
1710
+ assert size_ctype.kind() == "NoneType"
1711
+ assert size_arg is None
1712
+ assert isinstance(scale_arg, list)
1713
+ assert scale_arg
1714
+ assert all(isinstance(val, float) for val in scale_arg)
1715
+ if len(scale_arg) == 1:
1716
+ scale_arg = scale_arg * 2
1717
+ assert len(scale_arg) == 2
1718
+ out_h = int(scale_arg[0] * image_oper.shape[2])
1719
+ out_w = int(scale_arg[1] * image_oper.shape[3])
1720
+ arg_h = self.add_immediate_float_scalar(scale_arg[0])
1721
+ arg_w = self.add_immediate_float_scalar(scale_arg[1])
1722
+ else:
1723
+ raise Exception("Size and scale cannot both be None.") # noqa: TRY002
1724
+
1725
+ out_shape = (image_oper.shape[0], image_oper.shape[1], out_h, out_w)
1726
+ use_nchw = image_oper.use_nchw()
1727
+ out_id = self.add_tensor_operand(
1728
+ node.outputsAt(0), image_oper._replace(shape=out_shape)
1729
+ )
1730
+
1731
+ if image_oper.shape[0] == 0 or image_oper.shape[1] == 0:
1732
+ raise Exception("Flexible batch or channels not supported") # noqa: TRY002
1733
+
1734
+ # Handle variable input size
1735
+ for dim in (2, 3): # h, w indices
1736
+ if image_oper.shape[dim] == 0:
1737
+ if size_ctype.kind() != "NoneType":
1738
+ self.compute_operand_shape(out_id, dim, size_arg[dim - 2])
1739
+ elif scale_ctype.kind() != "NoneType":
1740
+ self.compute_operand_shape(
1741
+ out_id,
1742
+ dim,
1743
+ f"int({scale_arg[dim - 2]} * {flex_name(image_id, dim)})",
1744
+ )
1745
+ else:
1746
+ raise Exception( # noqa: TRY002
1747
+ "Size and scale cannot both be None."
1748
+ ) # noqa: TRY002
1749
+
1750
+ inputs = [None] * 4
1751
+ inputs[0] = image_id
1752
+ inputs[1] = arg_w
1753
+ inputs[2] = arg_h
1754
+ inputs[3] = self.add_immediate_bool_scalar(use_nchw)
1755
+
1756
+ outputs = [None] * 1
1757
+ outputs[0] = out_id
1758
+
1759
+ self.add_operation(NNAPI_OperationCode.RESIZE_NEAREST_NEIGHBOR, inputs, outputs)
1760
+
1761
+ def add_addmm(self, node):
1762
+ assert node.inputsSize() == 5
1763
+ assert node.outputsSize() == 1
1764
+ jit_bias, jit_input, jit_weight, jit_beta, jit_alpha = node.inputs()
1765
+
1766
+ for jitval in (jit_beta, jit_alpha):
1767
+ scale_ctype, scale_value = self.get_constant_value(jitval)
1768
+ assert scale_ctype.kind() in ("IntType", "FloatType")
1769
+ if scale_value != 1:
1770
+ raise Exception( # noqa: TRY002
1771
+ "NNAPI Fully-Connected does not support alpha and beta."
1772
+ )
1773
+
1774
+ self.add_addmm_or_linear(node, True, jit_input, jit_weight, jit_bias)
1775
+
1776
+ def add_linear(self, node):
1777
+ assert node.inputsSize() == 3
1778
+ assert node.outputsSize() == 1
1779
+ jit_input, jit_weight, jit_bias = node.inputs()
1780
+
1781
+ self.add_addmm_or_linear(node, False, jit_input, jit_weight, jit_bias)
1782
+
1783
+ def add_addmm_or_linear(
1784
+ self, node, transpose_weight, jit_input, jit_weight, jit_bias
1785
+ ):
1786
+ input_id, input_oper = self.get_tensor_operand_by_jitval(jit_input)
1787
+ bias_id, bias_oper = self.get_tensor_operand_for_weight(jit_bias)
1788
+
1789
+ assert len(input_oper.shape) == 2
1790
+ assert len(bias_oper.shape) == 1
1791
+
1792
+ # TODO: Transform at load time to share weights with CPU model.
1793
+ _, weight_tensor = self.get_constant_value(jit_weight, "TensorType")
1794
+ assert len(weight_tensor.shape) == 2
1795
+ if transpose_weight:
1796
+ nnapi_weight_tensor = weight_tensor.t().contiguous()
1797
+ else:
1798
+ nnapi_weight_tensor = weight_tensor.contiguous()
1799
+ weight_id = self.add_tensor_operand_for_weight(nnapi_weight_tensor)
1800
+ weight_oper = self.operands[weight_id]
1801
+
1802
+ out_shape = (input_oper.shape[0], weight_oper.shape[0])
1803
+ out_id = self.add_tensor_operand(
1804
+ node.outputsAt(0), input_oper._replace(shape=out_shape)
1805
+ )
1806
+
1807
+ if input_oper.shape[0] == 0:
1808
+ self.forward_operand_shape(out_id, 0, input_id, 0)
1809
+
1810
+ inputs = [None] * 4
1811
+ inputs[0] = input_id
1812
+ inputs[1] = weight_id
1813
+ inputs[2] = bias_id
1814
+ inputs[3] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE)
1815
+
1816
+ outputs = [None] * 1
1817
+ outputs[0] = out_id
1818
+
1819
+ self.add_operation(NNAPI_OperationCode.FULLY_CONNECTED, inputs, outputs)
1820
+
1821
+ def add_qlinear(self, node):
1822
+ assert node.inputsSize() == 4
1823
+ assert node.outputsSize() == 1
1824
+ (
1825
+ jit_input,
1826
+ jit_packed_weight,
1827
+ jit_scale,
1828
+ jit_zero_point,
1829
+ ) = node.inputs()
1830
+
1831
+ input_id, input_oper = self.get_tensor_operand_by_jitval_fixed_size(jit_input)
1832
+ # TODO: Support automatic reshape
1833
+ assert len(input_oper.shape) == 2
1834
+
1835
+ _, out_scale = self.get_constant_value(jit_scale, "FloatType")
1836
+ _, out_zero_point = self.get_constant_value(jit_zero_point, "IntType")
1837
+ weight_ctype, packed_weight = self.get_constant_value(jit_packed_weight)
1838
+ assert weight_ctype.name() == "LinearPackedParamsBase"
1839
+ raw_weight, raw_bias = packed_weight.__getstate__()[0]
1840
+ assert raw_bias is not None
1841
+
1842
+ assert len(raw_weight.shape) == 2
1843
+ assert len(raw_bias.shape) == 1
1844
+ assert raw_bias.shape[0] == raw_weight.shape[0]
1845
+ assert raw_weight.shape[1] == input_oper.shape[1]
1846
+
1847
+ assert raw_weight.qscheme() == torch.per_tensor_affine
1848
+ if raw_weight.dtype == torch.quint8:
1849
+ unsigned_weight = raw_weight
1850
+ else:
1851
+ assert raw_weight.dtype == torch.qint8
1852
+ unsigned_weight = torch._make_per_tensor_quantized_tensor(
1853
+ (raw_weight.int_repr().int() + 128).to(torch.uint8),
1854
+ scale=raw_weight.q_scale(),
1855
+ zero_point=raw_weight.q_zero_point() + 128,
1856
+ )
1857
+ weight_scale = unsigned_weight.q_scale()
1858
+ bias_scale = input_oper.scale * weight_scale
1859
+ int_bias = torch.quantize_per_tensor(raw_bias, bias_scale, 0, torch.qint32)
1860
+ bias_id = self.add_tensor_operand_for_weight(int_bias)
1861
+
1862
+ multiplier = input_oper.scale * weight_scale / out_scale
1863
+ assert multiplier > 0
1864
+ if multiplier >= 1:
1865
+ raise Exception( # noqa: TRY002
1866
+ "Quantized convolution multiplier is greater than 1. "
1867
+ "This is supported by NNAPI, but not by most hardware backends. "
1868
+ "Try training a model without quantization-aware training. "
1869
+ )
1870
+
1871
+ # TODO: Transform at load time to share weights with CPU model.
1872
+ nnapi_weight_tensor = unsigned_weight.contiguous()
1873
+ weight_id = self.add_tensor_operand_for_weight(nnapi_weight_tensor)
1874
+ weight_oper = self.operands[weight_id]
1875
+
1876
+ out_shape = (input_oper.shape[0], weight_oper.shape[0])
1877
+ out_oper = input_oper._replace(
1878
+ shape=out_shape,
1879
+ scale=out_scale,
1880
+ zero_point=out_zero_point,
1881
+ )
1882
+
1883
+ inputs = [None] * 4
1884
+ inputs[0] = input_id
1885
+ inputs[1] = weight_id
1886
+ inputs[2] = bias_id
1887
+ inputs[3] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE)
1888
+
1889
+ outputs = [None] * 1
1890
+ outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper)
1891
+
1892
+ self.add_operation(NNAPI_OperationCode.FULLY_CONNECTED, inputs, outputs)
1893
+
1894
+ def get_optional_bias(self, jit_bias, weight_tensor, transpose=False):
1895
+ ctype, value = self.get_constant_value(jit_bias)
1896
+ if ctype.kind() == "NoneType":
1897
+ bias_idx = 1 if transpose else 0
1898
+ nnapi_bias_tensor = torch.zeros(
1899
+ weight_tensor.size()[bias_idx], dtype=weight_tensor.dtype
1900
+ )
1901
+ bias_id = self.add_tensor_operand_for_weight(nnapi_bias_tensor)
1902
+ bias_oper = self.operands[bias_id]
1903
+ return bias_id, bias_oper
1904
+ else:
1905
+ return self.get_tensor_operand_for_weight(jit_bias)
1906
+
1907
+ def add_conv2d(self, node):
1908
+ assert node.inputsSize() == 7
1909
+ assert node.outputsSize() == 1
1910
+
1911
+ (
1912
+ jit_image,
1913
+ jit_weight,
1914
+ jit_bias,
1915
+ jit_stride,
1916
+ jit_pad,
1917
+ jit_dilation,
1918
+ jit_groups,
1919
+ ) = node.inputs()
1920
+
1921
+ _, weight_tensor = self.get_constant_value(jit_weight, "TensorType")
1922
+ bias_id, bias_oper = self.get_optional_bias(jit_bias, weight_tensor)
1923
+ args = self.get_conv_pool_args_2d_from_jit(
1924
+ weight_tensor.shape[2:4], jit_stride, jit_pad, jit_dilation, jit_groups
1925
+ )
1926
+
1927
+ return self.add_conv2d_common(
1928
+ node.outputsAt(0),
1929
+ 0.0,
1930
+ 0,
1931
+ jit_image,
1932
+ weight_tensor,
1933
+ bias_id,
1934
+ args,
1935
+ False, # transpose
1936
+ NNAPI_FuseCode.FUSED_NONE,
1937
+ )
1938
+
1939
+ def add_conv_underscore(self, node):
1940
+ assert node.inputsSize() == 13
1941
+ assert node.outputsSize() == 1
1942
+
1943
+ (
1944
+ jit_image,
1945
+ jit_weight,
1946
+ jit_bias,
1947
+ jit_stride,
1948
+ jit_pad,
1949
+ jit_dilation,
1950
+ jit_transpose,
1951
+ _,
1952
+ jit_groups,
1953
+ _,
1954
+ _,
1955
+ _,
1956
+ _,
1957
+ ) = node.inputs()
1958
+
1959
+ _, weight_tensor = self.get_constant_value(jit_weight, "TensorType")
1960
+ _, transpose = self.get_constant_value(jit_transpose)
1961
+ bias_id, bias_oper = self.get_optional_bias(jit_bias, weight_tensor, transpose)
1962
+ args = self.get_conv_pool_args_2d_from_jit(
1963
+ weight_tensor.shape[2:4], jit_stride, jit_pad, jit_dilation, jit_groups
1964
+ )
1965
+
1966
+ return self.add_conv2d_common(
1967
+ node.outputsAt(0),
1968
+ 0.0,
1969
+ 0,
1970
+ jit_image,
1971
+ weight_tensor,
1972
+ bias_id,
1973
+ args,
1974
+ transpose,
1975
+ NNAPI_FuseCode.FUSED_NONE,
1976
+ )
1977
+
1978
+ def add_log_softmax(self, node):
1979
+ assert node.inputsSize() == 3
1980
+ assert node.outputsSize() == 1
1981
+
1982
+ (jit_input, jit_dim, jit_half_to_float) = node.inputs()
1983
+ input_id, input_oper = self.get_tensor_operand_by_jitval_fixed_size(jit_input)
1984
+ _, dim = self.get_constant_value(jit_dim, "IntType")
1985
+
1986
+ out_shape = input_oper.shape
1987
+
1988
+ inputs = [None] * 3
1989
+ inputs[0] = input_id
1990
+ # specifying 1 as the scaling factor for the exponent, beta
1991
+ inputs[1] = self.add_immediate_float_scalar(1)
1992
+ inputs[2] = self.add_immediate_int_scalar(dim)
1993
+
1994
+ outputs = [None] * 1
1995
+ outputs[0] = self.add_tensor_operand(
1996
+ node.outputsAt(0), input_oper._replace(shape=out_shape)
1997
+ )
1998
+ self.add_operation(NNAPI_OperationCode.LOG_SOFTMAX, inputs, outputs)
1999
+
2000
+ def add_qconv2d(self, node, fuse_code, transpose=False):
2001
+ assert node.inputsSize() == 4
2002
+ assert node.outputsSize() == 1
2003
+
2004
+ (
2005
+ jit_image,
2006
+ jit_packed_weight,
2007
+ jit_scale,
2008
+ jit_zero_point,
2009
+ ) = node.inputs()
2010
+
2011
+ _, out_scale = self.get_constant_value(jit_scale, "FloatType")
2012
+ _, out_zero_point = self.get_constant_value(jit_zero_point, "IntType")
2013
+ weight_ctype, packed_weight = self.get_constant_value(jit_packed_weight)
2014
+ assert weight_ctype.name() == "Conv2dPackedParamsBase"
2015
+ (
2016
+ pack_version,
2017
+ tensors,
2018
+ opt_tensors,
2019
+ ) = packed_weight.__getstate__()[0]
2020
+ assert pack_version == "2"
2021
+ packed_config, raw_weight = tensors
2022
+ (raw_bias,) = opt_tensors
2023
+ assert raw_bias is not None
2024
+ args = self.get_conv_pool_args_2d_from_pack(
2025
+ raw_weight.shape[2:4], packed_config
2026
+ )
2027
+
2028
+ assert raw_weight.qscheme() == torch.per_tensor_affine
2029
+ if raw_weight.dtype == torch.quint8:
2030
+ unsigned_weight = raw_weight
2031
+ else:
2032
+ assert raw_weight.dtype == torch.qint8
2033
+ unsigned_weight = torch._make_per_tensor_quantized_tensor(
2034
+ (raw_weight.int_repr().int() + 128).to(torch.uint8),
2035
+ scale=raw_weight.q_scale(),
2036
+ zero_point=raw_weight.q_zero_point() + 128,
2037
+ )
2038
+ weight_scale = unsigned_weight.q_scale()
2039
+ _, image_oper = self.get_tensor_operand_by_jitval(jit_image)
2040
+ bias_scale = image_oper.scale * weight_scale
2041
+ int_bias = torch.quantize_per_tensor(raw_bias, bias_scale, 0, torch.qint32)
2042
+ bias_id = self.add_tensor_operand_for_weight(int_bias)
2043
+
2044
+ multiplier = image_oper.scale * weight_scale / out_scale
2045
+ assert multiplier > 0
2046
+ if multiplier >= 1:
2047
+ raise Exception( # noqa: TRY002
2048
+ "Quantized convolution multiplier is greater than 1. "
2049
+ "This is supported by NNAPI, but not by most hardware backends. "
2050
+ "Try training a model without quantization-aware training. "
2051
+ )
2052
+
2053
+ return self.add_conv2d_common(
2054
+ node.outputsAt(0),
2055
+ out_scale,
2056
+ out_zero_point,
2057
+ jit_image,
2058
+ unsigned_weight,
2059
+ bias_id,
2060
+ args,
2061
+ transpose,
2062
+ fuse_code,
2063
+ )
2064
+
2065
+ def add_conv2d_common(
2066
+ self,
2067
+ jit_out,
2068
+ out_scale,
2069
+ out_zero_point,
2070
+ jit_image,
2071
+ weight_tensor,
2072
+ bias_id,
2073
+ args,
2074
+ transpose,
2075
+ fuse_code,
2076
+ ):
2077
+ image_id, image_oper = self.get_tensor_operand_by_jitval(jit_image)
2078
+ in_c = image_oper.shape[1]
2079
+
2080
+ if args.group == 1:
2081
+ # Full convolution
2082
+ depthwise = False
2083
+ if transpose:
2084
+ weight_permutation = (1, 2, 3, 0)
2085
+ else:
2086
+ weight_permutation = (0, 2, 3, 1)
2087
+ elif args.group == in_c:
2088
+ # Depthwise convolution
2089
+ depthwise = True
2090
+ weight_permutation = (1, 2, 3, 0)
2091
+ else:
2092
+ raise Exception("Group convolution not supported yet.") # noqa: TRY002
2093
+
2094
+ # TODO: Transform at load time to share weights with CPU model.
2095
+ nnapi_weight_tensor = weight_tensor.permute(*weight_permutation).contiguous()
2096
+ weight_id = self.add_tensor_operand_for_weight(nnapi_weight_tensor)
2097
+ weight_oper = self.operands[weight_id]
2098
+
2099
+ bias_oper = self.operands[bias_id]
2100
+
2101
+ if image_oper.op_type == NNAPI_OperandCode.TENSOR_FLOAT32:
2102
+ assert weight_oper.op_type == NNAPI_OperandCode.TENSOR_FLOAT32
2103
+ assert bias_oper.op_type == NNAPI_OperandCode.TENSOR_FLOAT32
2104
+ elif image_oper.op_type == NNAPI_OperandCode.TENSOR_QUANT8_ASYMM:
2105
+ assert weight_oper.op_type == NNAPI_OperandCode.TENSOR_QUANT8_ASYMM
2106
+ assert bias_oper.op_type == NNAPI_OperandCode.TENSOR_INT32
2107
+ assert approx_equal(image_oper.scale * weight_oper.scale, bias_oper.scale)
2108
+ assert bias_oper.zero_point == 0
2109
+ else:
2110
+ raise Exception( # noqa: TRY002
2111
+ f"Unsupported input type for conv2d: {image_oper.op_type}"
2112
+ ) # noqa: TRY002
2113
+
2114
+ assert len(image_oper.shape) == 4
2115
+ assert len(weight_oper.shape) == 4
2116
+ assert len(bias_oper.shape) == 1
2117
+
2118
+ if depthwise:
2119
+ # Depthwise convolution
2120
+ one, kern_h, kern_w, out_c = weight_oper.shape
2121
+ assert one == 1
2122
+ assert out_c % in_c == 0
2123
+ channel_multiplier = out_c // in_c
2124
+ assert channel_multiplier == 1 # Don't support multiplier
2125
+ assert out_c == in_c
2126
+ else:
2127
+ # Full convolution
2128
+ out_c, kern_h, kern_w, kern_d = weight_oper.shape
2129
+ assert kern_d == in_c
2130
+
2131
+ assert out_c == bias_oper.shape[0]
2132
+
2133
+ use_nchw = image_oper.use_nchw()
2134
+
2135
+ if depthwise:
2136
+ num_args = 12
2137
+ opcode = NNAPI_OperationCode.DEPTHWISE_CONV_2D
2138
+ else:
2139
+ num_args = 11
2140
+ if transpose:
2141
+ opcode = NNAPI_OperationCode.TRANSPOSE_CONV_2D
2142
+ else:
2143
+ opcode = NNAPI_OperationCode.CONV_2D
2144
+
2145
+ inputs = [None] * num_args
2146
+ inputs[0] = image_id
2147
+ inputs[1] = weight_id
2148
+ inputs[2] = bias_id
2149
+ inputs[3] = self.add_immediate_int_scalar(args.pad_l)
2150
+ inputs[4] = self.add_immediate_int_scalar(args.pad_r)
2151
+ inputs[5] = self.add_immediate_int_scalar(args.pad_t)
2152
+ inputs[6] = self.add_immediate_int_scalar(args.pad_b)
2153
+ inputs[7] = self.add_immediate_int_scalar(args.stride_w)
2154
+ inputs[8] = self.add_immediate_int_scalar(args.stride_h)
2155
+ if depthwise:
2156
+ inputs[9] = self.add_immediate_int_scalar(1)
2157
+ inputs[10] = self.add_immediate_int_scalar(fuse_code)
2158
+ inputs[11] = self.add_immediate_bool_scalar(use_nchw)
2159
+ else:
2160
+ inputs[9] = self.add_immediate_int_scalar(fuse_code)
2161
+ inputs[10] = self.add_immediate_bool_scalar(use_nchw)
2162
+
2163
+ outputs = [None] * 1
2164
+ out_shape = get_conv_pool_shape(image_oper.shape, args, out_c, transpose)
2165
+ out_oper = image_oper._replace(
2166
+ shape=out_shape,
2167
+ scale=out_scale,
2168
+ zero_point=out_zero_point,
2169
+ )
2170
+ out_id = self.add_tensor_operand(jit_out, out_oper)
2171
+ self._handle_conv_pool_flexible_input(out_id, jit_image, args, transpose)
2172
+
2173
+ outputs[0] = out_id
2174
+ self.add_operation(opcode, inputs, outputs)
2175
+
2176
+ def _handle_conv_pool_flexible_input(self, out_id, jit_image, args, transpose):
2177
+ image_id, image_oper = self.get_tensor_operand_by_jitval(jit_image)
2178
+ batch, in_ch, in_h, in_w = image_oper.shape
2179
+
2180
+ if batch == 0:
2181
+ self.forward_operand_shape(out_id, 0, image_id, 0)
2182
+ if in_ch == 0:
2183
+ raise Exception("Input channels can't be flexible") # noqa: TRY002
2184
+ # H & W
2185
+ if transpose:
2186
+ if in_h == 0:
2187
+ self.compute_operand_shape(
2188
+ out_id,
2189
+ 2,
2190
+ f"({flex_name(image_id, 2)} - 1) * {args.stride_h} + {args.kernel_h} - {args.pad_t} - {args.pad_b}",
2191
+ )
2192
+ if in_w == 0:
2193
+ self.compute_operand_shape(
2194
+ out_id,
2195
+ 3,
2196
+ f"({flex_name(image_id, 3)} - 1) * {args.stride_w} + {args.kernel_w} - {args.pad_l} - {args.pad_r}",
2197
+ )
2198
+ else:
2199
+ if in_h == 0:
2200
+ self.compute_operand_shape(
2201
+ out_id,
2202
+ 2,
2203
+ f"({flex_name(image_id, 2)} - {args.kernel_h} + {args.pad_t} + {args.pad_b}) // {args.stride_h} + 1",
2204
+ )
2205
+ if in_w == 0:
2206
+ self.compute_operand_shape(
2207
+ out_id,
2208
+ 3,
2209
+ f"({flex_name(image_id, 3)} - {args.kernel_w} + {args.pad_l} + {args.pad_r}) // {args.stride_w} + 1",
2210
+ )
2211
+
2212
+
2213
+ def serialize_model(
2214
+ module, inputs, *, config=None, return_shapes=None, use_int16_for_qint16=False
2215
+ ):
2216
+ """Convert to NNAPI and serialize torchscript module.
2217
+
2218
+ Parameters:
2219
+ module: Torchscript module to convert
2220
+ inputs: Tensors used to specify input details for NNAPI
2221
+ config (optional): Optional config to attach to module
2222
+ return_shapes (optional): Specify shape of outputs if
2223
+ your module uses runtime flexible shapes to set output
2224
+ buffer size for NNAPI
2225
+ use_int16_for_qint16 (optional): Use Pytorch int16 to represent NNAPI qint16 values
2226
+ """
2227
+ return _NnapiSerializer(config, use_int16_for_qint16).serialize_model(
2228
+ module, inputs, return_shapes
2229
+ )
evalkit_tf446/lib/python3.10/site-packages/torch/backends/cpu/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ __all__ = [
4
+ "get_cpu_capability",
5
+ ]
6
+
7
+
8
+ def get_cpu_capability() -> str:
9
+ r"""Return cpu capability as a string value.
10
+
11
+ Possible values:
12
+ - "DEFAULT"
13
+ - "VSX"
14
+ - "Z VECTOR"
15
+ - "NO AVX"
16
+ - "AVX2"
17
+ - "AVX512"
18
+ """
19
+ return torch._C._get_cpu_capability()
evalkit_tf446/lib/python3.10/site-packages/torch/backends/cpu/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (534 Bytes). View file
 
evalkit_tf446/lib/python3.10/site-packages/torch/backends/cuda/__init__.py ADDED
@@ -0,0 +1,422 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import contextlib
3
+
4
+ from typing import Union
5
+ from typing_extensions import deprecated
6
+
7
+ import torch
8
+
9
+ __all__ = [
10
+ "is_built",
11
+ "cuFFTPlanCacheAttrContextProp",
12
+ "cuFFTPlanCache",
13
+ "cuFFTPlanCacheManager",
14
+ "cuBLASModule",
15
+ "preferred_linalg_library",
16
+ "preferred_blas_library",
17
+ "cufft_plan_cache",
18
+ "matmul",
19
+ "SDPAParams",
20
+ "enable_cudnn_sdp",
21
+ "cudnn_sdp_enabled",
22
+ "enable_flash_sdp",
23
+ "flash_sdp_enabled",
24
+ "enable_mem_efficient_sdp",
25
+ "mem_efficient_sdp_enabled",
26
+ "math_sdp_enabled",
27
+ "enable_math_sdp",
28
+ "can_use_flash_attention",
29
+ "can_use_efficient_attention",
30
+ "sdp_kernel",
31
+ ]
32
+
33
+
34
+ def is_built():
35
+ r"""
36
+ Return whether PyTorch is built with CUDA support.
37
+
38
+ Note that this doesn't necessarily mean CUDA is available; just that if this PyTorch
39
+ binary were run on a machine with working CUDA drivers and devices, we would be able to use it.
40
+ """
41
+ return torch._C._has_cuda
42
+
43
+
44
+ class cuFFTPlanCacheAttrContextProp:
45
+ # Like regular ContextProp, but uses the `.device_index` attribute from the
46
+ # calling object as the first argument to the getter and setter.
47
+ def __init__(self, getter, setter):
48
+ self.getter = getter
49
+ self.setter = setter
50
+
51
+ def __get__(self, obj, objtype):
52
+ return self.getter(obj.device_index)
53
+
54
+ def __set__(self, obj, val):
55
+ if isinstance(self.setter, str):
56
+ raise RuntimeError(self.setter)
57
+ self.setter(obj.device_index, val)
58
+
59
+
60
+ class cuFFTPlanCache:
61
+ r"""
62
+ Represent a specific plan cache for a specific `device_index`.
63
+
64
+ The attributes `size` and `max_size`, and method `clear`, can fetch and/ or
65
+ change properties of the C++ cuFFT plan cache.
66
+ """
67
+
68
+ def __init__(self, device_index):
69
+ self.device_index = device_index
70
+
71
+ size = cuFFTPlanCacheAttrContextProp(
72
+ torch._cufft_get_plan_cache_size,
73
+ ".size is a read-only property showing the number of plans currently in the "
74
+ "cache. To change the cache capacity, set cufft_plan_cache.max_size.",
75
+ )
76
+
77
+ max_size = cuFFTPlanCacheAttrContextProp(
78
+ torch._cufft_get_plan_cache_max_size, torch._cufft_set_plan_cache_max_size
79
+ )
80
+
81
+ def clear(self):
82
+ return torch._cufft_clear_plan_cache(self.device_index)
83
+
84
+
85
+ class cuFFTPlanCacheManager:
86
+ r"""
87
+ Represent all cuFFT plan caches, return the cuFFTPlanCache for a given device when indexed.
88
+
89
+ Finally, this object, when used directly as a `cuFFTPlanCache` object (e.g.,
90
+ setting the `.max_size`) attribute, the current device's cuFFT plan cache is
91
+ used.
92
+ """
93
+
94
+ __initialized = False
95
+
96
+ def __init__(self):
97
+ self.caches = []
98
+ self.__initialized = True
99
+
100
+ def __getitem__(self, device):
101
+ index = torch.cuda._utils._get_device_index(device)
102
+ if index < 0 or index >= torch.cuda.device_count():
103
+ raise RuntimeError(
104
+ f"cufft_plan_cache: expected 0 <= device index < {torch.cuda.device_count()}, but got "
105
+ f"device with index {index}"
106
+ )
107
+ if len(self.caches) == 0:
108
+ self.caches.extend(
109
+ cuFFTPlanCache(index) for index in range(torch.cuda.device_count())
110
+ )
111
+ return self.caches[index]
112
+
113
+ def __getattr__(self, name):
114
+ return getattr(self[torch.cuda.current_device()], name)
115
+
116
+ def __setattr__(self, name, value):
117
+ if self.__initialized:
118
+ return setattr(self[torch.cuda.current_device()], name, value)
119
+ else:
120
+ return super().__setattr__(name, value)
121
+
122
+
123
+ class cuBLASModule:
124
+ def __getattr__(self, name):
125
+ if name == "allow_tf32":
126
+ return torch._C._get_cublas_allow_tf32()
127
+ elif name == "allow_fp16_reduced_precision_reduction":
128
+ return torch._C._get_cublas_allow_fp16_reduced_precision_reduction()
129
+ elif name == "allow_bf16_reduced_precision_reduction":
130
+ return torch._C._get_cublas_allow_bf16_reduced_precision_reduction()
131
+ raise AttributeError("Unknown attribute " + name)
132
+
133
+ def __setattr__(self, name, value):
134
+ if name == "allow_tf32":
135
+ return torch._C._set_cublas_allow_tf32(value)
136
+ elif name == "allow_fp16_reduced_precision_reduction":
137
+ return torch._C._set_cublas_allow_fp16_reduced_precision_reduction(value)
138
+ elif name == "allow_bf16_reduced_precision_reduction":
139
+ return torch._C._set_cublas_allow_bf16_reduced_precision_reduction(value)
140
+ raise AttributeError("Unknown attribute " + name)
141
+
142
+
143
+ _LinalgBackends = {
144
+ "default": torch._C._LinalgBackend.Default,
145
+ "cusolver": torch._C._LinalgBackend.Cusolver,
146
+ "magma": torch._C._LinalgBackend.Magma,
147
+ }
148
+ _LinalgBackends_str = ", ".join(_LinalgBackends.keys())
149
+
150
+
151
+ def preferred_linalg_library(
152
+ backend: Union[None, str, torch._C._LinalgBackend] = None
153
+ ) -> torch._C._LinalgBackend:
154
+ r"""
155
+ Override the heuristic PyTorch uses to choose between cuSOLVER and MAGMA for CUDA linear algebra operations.
156
+
157
+ .. warning:: This flag is experimental and subject to change.
158
+
159
+ When PyTorch runs a CUDA linear algebra operation it often uses the cuSOLVER or MAGMA libraries,
160
+ and if both are available it decides which to use with a heuristic.
161
+ This flag (a :class:`str`) allows overriding those heuristics.
162
+
163
+ * If `"cusolver"` is set then cuSOLVER will be used wherever possible.
164
+ * If `"magma"` is set then MAGMA will be used wherever possible.
165
+ * If `"default"` (the default) is set then heuristics will be used to pick between
166
+ cuSOLVER and MAGMA if both are available.
167
+ * When no input is given, this function returns the currently preferred library.
168
+ * User may use the environment variable TORCH_LINALG_PREFER_CUSOLVER=1 to set the preferred library to cuSOLVER
169
+ globally.
170
+ This flag only sets the initial value of the preferred library and the preferred library
171
+ may still be overridden by this function call later in your script.
172
+
173
+ Note: When a library is preferred other libraries may still be used if the preferred library
174
+ doesn't implement the operation(s) called.
175
+ This flag may achieve better performance if PyTorch's heuristic library selection is incorrect
176
+ for your application's inputs.
177
+
178
+ Currently supported linalg operators:
179
+
180
+ * :func:`torch.linalg.inv`
181
+ * :func:`torch.linalg.inv_ex`
182
+ * :func:`torch.linalg.cholesky`
183
+ * :func:`torch.linalg.cholesky_ex`
184
+ * :func:`torch.cholesky_solve`
185
+ * :func:`torch.cholesky_inverse`
186
+ * :func:`torch.linalg.lu_factor`
187
+ * :func:`torch.linalg.lu`
188
+ * :func:`torch.linalg.lu_solve`
189
+ * :func:`torch.linalg.qr`
190
+ * :func:`torch.linalg.eigh`
191
+ * :func:`torch.linalg.eighvals`
192
+ * :func:`torch.linalg.svd`
193
+ * :func:`torch.linalg.svdvals`
194
+ """
195
+ if backend is None:
196
+ pass
197
+ elif isinstance(backend, str):
198
+ if backend not in _LinalgBackends:
199
+ raise RuntimeError(
200
+ "Unknown input value. " f"Choose from: {_LinalgBackends_str}."
201
+ )
202
+ torch._C._set_linalg_preferred_backend(_LinalgBackends[backend])
203
+ elif isinstance(backend, torch._C._LinalgBackend):
204
+ torch._C._set_linalg_preferred_backend(backend)
205
+ else:
206
+ raise RuntimeError("Unknown input value type.")
207
+
208
+ return torch._C._get_linalg_preferred_backend()
209
+
210
+
211
+ _BlasBackends = {
212
+ "cublas": torch._C._BlasBackend.Cublas,
213
+ "cublaslt": torch._C._BlasBackend.Cublaslt,
214
+ "hipblaslt": torch._C._BlasBackend.Cublaslt, # alias
215
+ }
216
+ _BlasBackends_str = ", ".join(_BlasBackends.keys())
217
+
218
+
219
+ def preferred_blas_library(
220
+ backend: Union[None, str, torch._C._BlasBackend] = None
221
+ ) -> torch._C._BlasBackend:
222
+ r"""
223
+ Override the library PyTorch uses for BLAS operations. Choose between cuBLAS and cuBLASLt.
224
+
225
+ .. warning:: This flag is experimental and subject to change.
226
+
227
+ When PyTorch runs a CUDA BLAS operation it defaults to cuBLAS even if both cuBLAS and cuBLASLt are available.
228
+ For PyTorch built for ROCm, hipBLAS and hipBLASLt may offer different performance.
229
+ This flag (a :class:`str`) allows overriding which BLAS library to use.
230
+
231
+ * If `"cublas"` is set then cuBLAS will be used wherever possible.
232
+ * If `"cublaslt"` is set then cuBLASLt will be used wherever possible.
233
+ * When no input is given, this function returns the currently preferred library.
234
+ * User may use the environment variable TORCH_BLAS_PREFER_CUBLASLT=1 to set the preferred library to cuBLASLt
235
+ globally.
236
+ This flag only sets the initial value of the preferred library and the preferred library
237
+ may still be overridden by this function call later in your script.
238
+
239
+ Note: When a library is preferred other libraries may still be used if the preferred library
240
+ doesn't implement the operation(s) called.
241
+ This flag may achieve better performance if PyTorch's library selection is incorrect
242
+ for your application's inputs.
243
+
244
+ """
245
+ if backend is None:
246
+ pass
247
+ elif isinstance(backend, str):
248
+ if backend not in _BlasBackends:
249
+ raise RuntimeError(
250
+ "Unknown input value. " f"Choose from: {_BlasBackends_str}."
251
+ )
252
+ torch._C._set_blas_preferred_backend(_BlasBackends[backend])
253
+ elif isinstance(backend, torch._C._BlasBackend):
254
+ torch._C._set_blas_preferred_backend(backend)
255
+ else:
256
+ raise RuntimeError("Unknown input value type.")
257
+
258
+ return torch._C._get_blas_preferred_backend()
259
+
260
+
261
+ from torch._C import _SDPAParams as SDPAParams, _SDPBackend as SDPBackend
262
+
263
+ # Set the __module__ attribute
264
+ SDPAParams.__module__ = "torch.backends.cuda"
265
+ SDPAParams.__name__ = "SDPAParams"
266
+
267
+
268
+ def flash_sdp_enabled():
269
+ r"""
270
+ .. warning:: This flag is beta and subject to change.
271
+
272
+ Returns whether flash scaled dot product attention is enabled or not.
273
+ """
274
+ return torch._C._get_flash_sdp_enabled()
275
+
276
+
277
+ def enable_flash_sdp(enabled: bool):
278
+ r"""
279
+ .. warning:: This flag is beta and subject to change.
280
+
281
+ Enables or disables flash scaled dot product attention.
282
+ """
283
+ torch._C._set_sdp_use_flash(enabled)
284
+
285
+
286
+ def mem_efficient_sdp_enabled():
287
+ r"""
288
+ .. warning:: This flag is beta and subject to change.
289
+
290
+ Returns whether memory efficient scaled dot product attention is enabled or not.
291
+ """
292
+ return torch._C._get_mem_efficient_sdp_enabled()
293
+
294
+
295
+ def enable_mem_efficient_sdp(enabled: bool):
296
+ r"""
297
+ .. warning:: This flag is beta and subject to change.
298
+
299
+ Enables or disables memory efficient scaled dot product attention.
300
+ """
301
+ torch._C._set_sdp_use_mem_efficient(enabled)
302
+
303
+
304
+ def math_sdp_enabled():
305
+ r"""
306
+ .. warning:: This flag is beta and subject to change.
307
+
308
+ Returns whether math scaled dot product attention is enabled or not.
309
+ """
310
+ return torch._C._get_math_sdp_enabled()
311
+
312
+
313
+ def enable_math_sdp(enabled: bool):
314
+ r"""
315
+ .. warning:: This flag is beta and subject to change.
316
+
317
+ Enables or disables math scaled dot product attention.
318
+ """
319
+ torch._C._set_sdp_use_math(enabled)
320
+
321
+
322
+ def can_use_flash_attention(params: SDPAParams, debug: bool = False) -> bool:
323
+ r"""Check if FlashAttention can be utilized in scaled_dot_product_attention.
324
+
325
+ Args:
326
+ params: An instance of SDPAParams containing the tensors for query,
327
+ key, value, an optional attention mask, dropout rate, and
328
+ a flag indicating if the attention is causal.
329
+ debug: Whether to logging.warn debug information as to why FlashAttention could not be run.
330
+ Defaults to False.
331
+
332
+ Returns:
333
+ True if FlashAttention can be used with the given parameters; otherwise, False.
334
+
335
+ Note:
336
+ This function is dependent on a CUDA-enabled build of PyTorch. It will return False
337
+ in non-CUDA environments.
338
+ """
339
+ return torch._C._can_use_flash_attention(params, debug)
340
+
341
+
342
+ def can_use_efficient_attention(params: SDPAParams, debug: bool = False) -> bool:
343
+ r"""Check if efficient_attention can be utilized in scaled_dot_product_attention.
344
+
345
+ Args:
346
+ params: An instance of SDPAParams containing the tensors for query,
347
+ key, value, an optional attention mask, dropout rate, and
348
+ a flag indicating if the attention is causal.
349
+ debug: Whether to logging.warn with information as to why efficient_attention could not be run.
350
+ Defaults to False.
351
+
352
+ Returns:
353
+ True if efficient_attention can be used with the given parameters; otherwise, False.
354
+
355
+ Note:
356
+ This function is dependent on a CUDA-enabled build of PyTorch. It will return False
357
+ in non-CUDA environments.
358
+ """
359
+ return torch._C._can_use_mem_efficient_attention(params, debug)
360
+
361
+
362
+ def cudnn_sdp_enabled():
363
+ r"""
364
+ .. warning:: This flag is beta and subject to change.
365
+
366
+ Returns whether cuDNN scaled dot product attention is enabled or not.
367
+ """
368
+ return torch._C._get_cudnn_sdp_enabled()
369
+
370
+
371
+ def enable_cudnn_sdp(enabled: bool):
372
+ r"""
373
+ .. warning:: This flag is beta and subject to change.
374
+
375
+ Enables or disables cuDNN scaled dot product attention.
376
+ """
377
+ torch._C._set_sdp_use_cudnn(enabled)
378
+
379
+
380
+ @contextlib.contextmanager
381
+ @deprecated(
382
+ (
383
+ "`torch.backends.cuda.sdp_kernel()` is deprecated. "
384
+ "In the future, this context manager will be removed. "
385
+ "Please see `torch.nn.attention.sdpa_kernel()` for the new context manager, "
386
+ "with updated signature."
387
+ ),
388
+ category=FutureWarning,
389
+ )
390
+ def sdp_kernel(
391
+ enable_flash: bool = True,
392
+ enable_math: bool = True,
393
+ enable_mem_efficient: bool = True,
394
+ enable_cudnn: bool = True,
395
+ ):
396
+ r"""
397
+ .. warning:: This flag is beta and subject to change.
398
+
399
+ This context manager can be used to temporarily enable or disable any of the three backends for scaled dot product attention.
400
+ Upon exiting the context manager, the previous state of the flags will be restored.
401
+ """
402
+ from torch.nn.attention import sdpa_kernel
403
+
404
+ backend_list = []
405
+ if enable_flash:
406
+ backend_list.append(SDPBackend.FLASH_ATTENTION)
407
+ if enable_mem_efficient:
408
+ backend_list.append(SDPBackend.EFFICIENT_ATTENTION)
409
+ if enable_math:
410
+ backend_list.append(SDPBackend.MATH)
411
+ if enable_cudnn:
412
+ backend_list.append(SDPBackend.CUDNN_ATTENTION)
413
+
414
+ with sdpa_kernel(backend_list) as context:
415
+ try:
416
+ yield context
417
+ finally:
418
+ pass
419
+
420
+
421
+ cufft_plan_cache = cuFFTPlanCacheManager()
422
+ matmul = cuBLASModule()
evalkit_tf446/lib/python3.10/site-packages/torch/backends/cuda/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (14.7 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/torch/backends/cudnn/__init__.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import os
3
+ import sys
4
+ import warnings
5
+ from contextlib import contextmanager
6
+ from typing import Optional
7
+
8
+ import torch
9
+ from torch.backends import __allow_nonbracketed_mutation, ContextProp, PropModule
10
+
11
+ try:
12
+ from torch._C import _cudnn
13
+ except ImportError:
14
+ _cudnn = None # type: ignore[assignment]
15
+
16
+ # Write:
17
+ #
18
+ # torch.backends.cudnn.enabled = False
19
+ #
20
+ # to globally disable CuDNN/MIOpen
21
+
22
+ __cudnn_version: Optional[int] = None
23
+
24
+ if _cudnn is not None:
25
+
26
+ def _init():
27
+ global __cudnn_version
28
+ if __cudnn_version is None:
29
+ __cudnn_version = _cudnn.getVersionInt()
30
+ runtime_version = _cudnn.getRuntimeVersion()
31
+ compile_version = _cudnn.getCompileVersion()
32
+ runtime_major, runtime_minor, _ = runtime_version
33
+ compile_major, compile_minor, _ = compile_version
34
+ # Different major versions are always incompatible
35
+ # Starting with cuDNN 7, minor versions are backwards-compatible
36
+ # Not sure about MIOpen (ROCm), so always do a strict check
37
+ if runtime_major != compile_major:
38
+ cudnn_compatible = False
39
+ elif runtime_major < 7 or not _cudnn.is_cuda:
40
+ cudnn_compatible = runtime_minor == compile_minor
41
+ else:
42
+ cudnn_compatible = runtime_minor >= compile_minor
43
+ if not cudnn_compatible:
44
+ if os.environ.get("PYTORCH_SKIP_CUDNN_COMPATIBILITY_CHECK", "0") == "1":
45
+ return True
46
+ base_error_msg = (
47
+ f"cuDNN version incompatibility: "
48
+ f"PyTorch was compiled against {compile_version} "
49
+ f"but found runtime version {runtime_version}. "
50
+ f"PyTorch already comes bundled with cuDNN. "
51
+ f"One option to resolving this error is to ensure PyTorch "
52
+ f"can find the bundled cuDNN. "
53
+ )
54
+
55
+ if "LD_LIBRARY_PATH" in os.environ:
56
+ ld_library_path = os.environ.get("LD_LIBRARY_PATH", "")
57
+ if any(
58
+ substring in ld_library_path for substring in ["cuda", "cudnn"]
59
+ ):
60
+ raise RuntimeError(
61
+ f"{base_error_msg}"
62
+ f"Looks like your LD_LIBRARY_PATH contains incompatible version of cudnn. "
63
+ f"Please either remove it from the path or install cudnn {compile_version}"
64
+ )
65
+ else:
66
+ raise RuntimeError(
67
+ f"{base_error_msg}"
68
+ f"one possibility is that there is a "
69
+ f"conflicting cuDNN in LD_LIBRARY_PATH."
70
+ )
71
+ else:
72
+ raise RuntimeError(base_error_msg)
73
+
74
+ return True
75
+
76
+ else:
77
+
78
+ def _init():
79
+ return False
80
+
81
+
82
+ def version():
83
+ """Return the version of cuDNN."""
84
+ if not _init():
85
+ return None
86
+ return __cudnn_version
87
+
88
+
89
+ CUDNN_TENSOR_DTYPES = {
90
+ torch.half,
91
+ torch.float,
92
+ torch.double,
93
+ }
94
+
95
+
96
+ def is_available():
97
+ r"""Return a bool indicating if CUDNN is currently available."""
98
+ return torch._C._has_cudnn
99
+
100
+
101
+ def is_acceptable(tensor):
102
+ if not torch._C._get_cudnn_enabled():
103
+ return False
104
+ if tensor.device.type != "cuda" or tensor.dtype not in CUDNN_TENSOR_DTYPES:
105
+ return False
106
+ if not is_available():
107
+ warnings.warn(
108
+ "PyTorch was compiled without cuDNN/MIOpen support. To use cuDNN/MIOpen, rebuild "
109
+ "PyTorch making sure the library is visible to the build system."
110
+ )
111
+ return False
112
+ if not _init():
113
+ warnings.warn(
114
+ "cuDNN/MIOpen library not found. Check your {libpath}".format(
115
+ libpath={"darwin": "DYLD_LIBRARY_PATH", "win32": "PATH"}.get(
116
+ sys.platform, "LD_LIBRARY_PATH"
117
+ )
118
+ )
119
+ )
120
+ return False
121
+ return True
122
+
123
+
124
+ def set_flags(
125
+ _enabled=None,
126
+ _benchmark=None,
127
+ _benchmark_limit=None,
128
+ _deterministic=None,
129
+ _allow_tf32=None,
130
+ ):
131
+ orig_flags = (
132
+ torch._C._get_cudnn_enabled(),
133
+ torch._C._get_cudnn_benchmark(),
134
+ None if not is_available() else torch._C._cuda_get_cudnn_benchmark_limit(),
135
+ torch._C._get_cudnn_deterministic(),
136
+ torch._C._get_cudnn_allow_tf32(),
137
+ )
138
+ if _enabled is not None:
139
+ torch._C._set_cudnn_enabled(_enabled)
140
+ if _benchmark is not None:
141
+ torch._C._set_cudnn_benchmark(_benchmark)
142
+ if _benchmark_limit is not None and is_available():
143
+ torch._C._cuda_set_cudnn_benchmark_limit(_benchmark_limit)
144
+ if _deterministic is not None:
145
+ torch._C._set_cudnn_deterministic(_deterministic)
146
+ if _allow_tf32 is not None:
147
+ torch._C._set_cudnn_allow_tf32(_allow_tf32)
148
+ return orig_flags
149
+
150
+
151
+ @contextmanager
152
+ def flags(
153
+ enabled=False,
154
+ benchmark=False,
155
+ benchmark_limit=10,
156
+ deterministic=False,
157
+ allow_tf32=True,
158
+ ):
159
+ with __allow_nonbracketed_mutation():
160
+ orig_flags = set_flags(
161
+ enabled, benchmark, benchmark_limit, deterministic, allow_tf32
162
+ )
163
+ try:
164
+ yield
165
+ finally:
166
+ # recover the previous values
167
+ with __allow_nonbracketed_mutation():
168
+ set_flags(*orig_flags)
169
+
170
+
171
+ # The magic here is to allow us to intercept code like this:
172
+ #
173
+ # torch.backends.<cudnn|mkldnn>.enabled = True
174
+
175
+
176
+ class CudnnModule(PropModule):
177
+ def __init__(self, m, name):
178
+ super().__init__(m, name)
179
+
180
+ enabled = ContextProp(torch._C._get_cudnn_enabled, torch._C._set_cudnn_enabled)
181
+ deterministic = ContextProp(
182
+ torch._C._get_cudnn_deterministic, torch._C._set_cudnn_deterministic
183
+ )
184
+ benchmark = ContextProp(
185
+ torch._C._get_cudnn_benchmark, torch._C._set_cudnn_benchmark
186
+ )
187
+ benchmark_limit = None
188
+ if is_available():
189
+ benchmark_limit = ContextProp(
190
+ torch._C._cuda_get_cudnn_benchmark_limit,
191
+ torch._C._cuda_set_cudnn_benchmark_limit,
192
+ )
193
+ allow_tf32 = ContextProp(
194
+ torch._C._get_cudnn_allow_tf32, torch._C._set_cudnn_allow_tf32
195
+ )
196
+
197
+
198
+ # This is the sys.modules replacement trick, see
199
+ # https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273
200
+ sys.modules[__name__] = CudnnModule(sys.modules[__name__], __name__)
201
+
202
+ # Add type annotation for the replaced module
203
+ enabled: bool
204
+ deterministic: bool
205
+ benchmark: bool
206
+ allow_tf32: bool
207
+ benchmark_limit: int
evalkit_tf446/lib/python3.10/site-packages/torch/backends/cudnn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.78 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/torch/backends/cudnn/__pycache__/rnn.cpython-310.pyc ADDED
Binary file (1.8 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/torch/backends/cudnn/rnn.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch.cuda
3
+
4
+ try:
5
+ from torch._C import _cudnn
6
+ except ImportError:
7
+ # Uses of all the functions below should be guarded by torch.backends.cudnn.is_available(),
8
+ # so it's safe to not emit any checks here.
9
+ _cudnn = None # type: ignore[assignment]
10
+
11
+
12
+ def get_cudnn_mode(mode):
13
+ if mode == "RNN_RELU":
14
+ return int(_cudnn.RNNMode.rnn_relu)
15
+ elif mode == "RNN_TANH":
16
+ return int(_cudnn.RNNMode.rnn_tanh)
17
+ elif mode == "LSTM":
18
+ return int(_cudnn.RNNMode.lstm)
19
+ elif mode == "GRU":
20
+ return int(_cudnn.RNNMode.gru)
21
+ else:
22
+ raise Exception(f"Unknown mode: {mode}") # noqa: TRY002
23
+
24
+
25
+ # NB: We don't actually need this class anymore (in fact, we could serialize the
26
+ # dropout state for even better reproducibility), but it is kept for backwards
27
+ # compatibility for old models.
28
+ class Unserializable:
29
+ def __init__(self, inner):
30
+ self.inner = inner
31
+
32
+ def get(self):
33
+ return self.inner
34
+
35
+ def __getstate__(self):
36
+ # Note: can't return {}, because python2 won't call __setstate__
37
+ # if the value evaluates to False
38
+ return "<unserializable>"
39
+
40
+ def __setstate__(self, state):
41
+ self.inner = None
42
+
43
+
44
+ def init_dropout_state(dropout, train, dropout_seed, dropout_state):
45
+ dropout_desc_name = "desc_" + str(torch.cuda.current_device())
46
+ dropout_p = dropout if train else 0
47
+ if (dropout_desc_name not in dropout_state) or (
48
+ dropout_state[dropout_desc_name].get() is None
49
+ ):
50
+ if dropout_p == 0:
51
+ dropout_state[dropout_desc_name] = Unserializable(None)
52
+ else:
53
+ dropout_state[dropout_desc_name] = Unserializable(
54
+ torch._cudnn_init_dropout_state( # type: ignore[call-arg]
55
+ dropout_p,
56
+ train,
57
+ dropout_seed,
58
+ self_ty=torch.uint8,
59
+ device=torch.device("cuda"),
60
+ )
61
+ )
62
+ dropout_ts = dropout_state[dropout_desc_name].get()
63
+ return dropout_ts
evalkit_tf446/lib/python3.10/site-packages/torch/backends/mha/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (889 Bytes). View file
 
evalkit_tf446/lib/python3.10/site-packages/torch/backends/mkl/__init__.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+
4
+
5
+ def is_available():
6
+ r"""Return whether PyTorch is built with MKL support."""
7
+ return torch._C.has_mkl
8
+
9
+
10
+ VERBOSE_OFF = 0
11
+ VERBOSE_ON = 1
12
+
13
+
14
+ class verbose:
15
+ """
16
+ On-demand oneMKL verbosing functionality.
17
+
18
+ To make it easier to debug performance issues, oneMKL can dump verbose
19
+ messages containing execution information like duration while executing
20
+ the kernel. The verbosing functionality can be invoked via an environment
21
+ variable named `MKL_VERBOSE`. However, this methodology dumps messages in
22
+ all steps. Those are a large amount of verbose messages. Moreover, for
23
+ investigating the performance issues, generally taking verbose messages
24
+ for one single iteration is enough. This on-demand verbosing functionality
25
+ makes it possible to control scope for verbose message dumping. In the
26
+ following example, verbose messages will be dumped out for the second
27
+ inference only.
28
+
29
+ .. highlight:: python
30
+ .. code-block:: python
31
+
32
+ import torch
33
+ model(data)
34
+ with torch.backends.mkl.verbose(torch.backends.mkl.VERBOSE_ON):
35
+ model(data)
36
+
37
+ Args:
38
+ level: Verbose level
39
+ - ``VERBOSE_OFF``: Disable verbosing
40
+ - ``VERBOSE_ON``: Enable verbosing
41
+ """
42
+
43
+ def __init__(self, enable):
44
+ self.enable = enable
45
+
46
+ def __enter__(self):
47
+ if self.enable == VERBOSE_OFF:
48
+ return
49
+ st = torch._C._verbose.mkl_set_verbose(self.enable)
50
+ assert (
51
+ st
52
+ ), "Failed to set MKL into verbose mode. Please consider to disable this verbose scope."
53
+ return self
54
+
55
+ def __exit__(self, exc_type, exc_val, exc_tb):
56
+ torch._C._verbose.mkl_set_verbose(VERBOSE_OFF)
57
+ return False
evalkit_tf446/lib/python3.10/site-packages/torch/backends/mkl/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.3 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/torch/backends/mkldnn/__init__.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import sys
3
+ from contextlib import contextmanager
4
+
5
+ from typing import TYPE_CHECKING
6
+
7
+ import torch
8
+ from torch.backends import __allow_nonbracketed_mutation, ContextProp, PropModule
9
+
10
+
11
+ def is_available():
12
+ r"""Return whether PyTorch is built with MKL-DNN support."""
13
+ return torch._C._has_mkldnn
14
+
15
+
16
+ VERBOSE_OFF = 0
17
+ VERBOSE_ON = 1
18
+ VERBOSE_ON_CREATION = 2
19
+
20
+
21
+ class verbose:
22
+ """
23
+ On-demand oneDNN (former MKL-DNN) verbosing functionality.
24
+
25
+ To make it easier to debug performance issues, oneDNN can dump verbose
26
+ messages containing information like kernel size, input data size and
27
+ execution duration while executing the kernel. The verbosing functionality
28
+ can be invoked via an environment variable named `DNNL_VERBOSE`. However,
29
+ this methodology dumps messages in all steps. Those are a large amount of
30
+ verbose messages. Moreover, for investigating the performance issues,
31
+ generally taking verbose messages for one single iteration is enough.
32
+ This on-demand verbosing functionality makes it possible to control scope
33
+ for verbose message dumping. In the following example, verbose messages
34
+ will be dumped out for the second inference only.
35
+
36
+ .. highlight:: python
37
+ .. code-block:: python
38
+
39
+ import torch
40
+ model(data)
41
+ with torch.backends.mkldnn.verbose(torch.backends.mkldnn.VERBOSE_ON):
42
+ model(data)
43
+
44
+ Args:
45
+ level: Verbose level
46
+ - ``VERBOSE_OFF``: Disable verbosing
47
+ - ``VERBOSE_ON``: Enable verbosing
48
+ - ``VERBOSE_ON_CREATION``: Enable verbosing, including oneDNN kernel creation
49
+ """
50
+
51
+ def __init__(self, level):
52
+ self.level = level
53
+
54
+ def __enter__(self):
55
+ if self.level == VERBOSE_OFF:
56
+ return
57
+ st = torch._C._verbose.mkldnn_set_verbose(self.level)
58
+ assert (
59
+ st
60
+ ), "Failed to set MKLDNN into verbose mode. Please consider to disable this verbose scope."
61
+ return self
62
+
63
+ def __exit__(self, exc_type, exc_val, exc_tb):
64
+ torch._C._verbose.mkldnn_set_verbose(VERBOSE_OFF)
65
+ return False
66
+
67
+
68
+ def set_flags(_enabled):
69
+ orig_flags = (torch._C._get_mkldnn_enabled(),)
70
+ torch._C._set_mkldnn_enabled(_enabled)
71
+ return orig_flags
72
+
73
+
74
+ @contextmanager
75
+ def flags(enabled=False):
76
+ with __allow_nonbracketed_mutation():
77
+ orig_flags = set_flags(enabled)
78
+ try:
79
+ yield
80
+ finally:
81
+ with __allow_nonbracketed_mutation():
82
+ set_flags(orig_flags[0])
83
+
84
+
85
+ class MkldnnModule(PropModule):
86
+ def __init__(self, m, name):
87
+ super().__init__(m, name)
88
+
89
+ enabled = ContextProp(torch._C._get_mkldnn_enabled, torch._C._set_mkldnn_enabled)
90
+
91
+
92
+ if TYPE_CHECKING:
93
+ enabled: ContextProp
94
+
95
+
96
+ # Cool stuff from torch/backends/cudnn/__init__.py and
97
+ # https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273
98
+ sys.modules[__name__] = MkldnnModule(sys.modules[__name__], __name__)
evalkit_tf446/lib/python3.10/site-packages/torch/backends/mkldnn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.68 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/torch/backends/mps/__init__.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from functools import lru_cache as _lru_cache
3
+
4
+ from typing import Optional
5
+
6
+ import torch
7
+ from ...library import Library as _Library
8
+
9
+ __all__ = ["is_built", "is_available", "is_macos13_or_newer", "is_macos_or_newer"]
10
+
11
+
12
+ def is_built() -> bool:
13
+ r"""Return whether PyTorch is built with MPS support.
14
+
15
+ Note that this doesn't necessarily mean MPS is available; just that
16
+ if this PyTorch binary were run a machine with working MPS drivers
17
+ and devices, we would be able to use it.
18
+ """
19
+ return torch._C._has_mps
20
+
21
+
22
+ @_lru_cache
23
+ def is_available() -> bool:
24
+ r"""Return a bool indicating if MPS is currently available."""
25
+ return torch._C._mps_is_available()
26
+
27
+
28
+ @_lru_cache
29
+ def is_macos_or_newer(major: int, minor: int) -> bool:
30
+ r"""Return a bool indicating whether MPS is running on given MacOS or newer."""
31
+ return torch._C._mps_is_on_macos_or_newer(major, minor)
32
+
33
+
34
+ @_lru_cache
35
+ def is_macos13_or_newer(minor: int = 0) -> bool:
36
+ r"""Return a bool indicating whether MPS is running on MacOS 13 or newer."""
37
+ return torch._C._mps_is_on_macos_or_newer(13, minor)
38
+
39
+
40
+ _lib: Optional[_Library] = None
41
+
42
+
43
+ def _init():
44
+ r"""Register prims as implementation of var_mean and group_norm."""
45
+ global _lib
46
+ if is_built() is False or _lib is not None:
47
+ return
48
+ from ..._decomp.decompositions import (
49
+ native_group_norm_backward as _native_group_norm_backward,
50
+ )
51
+ from ..._refs import native_group_norm as _native_group_norm
52
+
53
+ _lib = _Library("aten", "IMPL")
54
+ _lib.impl("native_group_norm", _native_group_norm, "MPS")
55
+ _lib.impl("native_group_norm_backward", _native_group_norm_backward, "MPS")
evalkit_tf446/lib/python3.10/site-packages/torch/backends/mps/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.97 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/torch/backends/nnpack/__init__.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from contextlib import contextmanager
3
+
4
+ import torch
5
+ from torch.backends import __allow_nonbracketed_mutation, ContextProp, PropModule
6
+
7
+ __all__ = ["is_available", "flags", "set_flags"]
8
+
9
+
10
+ def is_available():
11
+ r"""Return whether PyTorch is built with NNPACK support."""
12
+ return torch._nnpack_available()
13
+
14
+
15
+ def set_flags(_enabled):
16
+ r"""Set if nnpack is enabled globally"""
17
+ orig_flags = (torch._C._get_nnpack_enabled(),)
18
+ torch._C._set_nnpack_enabled(_enabled)
19
+ return orig_flags
20
+
21
+
22
+ @contextmanager
23
+ def flags(enabled=False):
24
+ r"""Context manager for setting if nnpack is enabled globally"""
25
+ with __allow_nonbracketed_mutation():
26
+ orig_flags = set_flags(enabled)
27
+ try:
28
+ yield
29
+ finally:
30
+ with __allow_nonbracketed_mutation():
31
+ set_flags(orig_flags[0])
evalkit_tf446/lib/python3.10/site-packages/torch/backends/nnpack/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.19 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/torch/backends/openmp/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+
4
+
5
+ def is_available():
6
+ r"""Return whether PyTorch is built with OpenMP support."""
7
+ return torch._C.has_openmp
evalkit_tf446/lib/python3.10/site-packages/torch/backends/openmp/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (379 Bytes). View file
 
evalkit_tf446/lib/python3.10/site-packages/torch/backends/opt_einsum/__init__.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import sys
3
+ import warnings
4
+ from contextlib import contextmanager
5
+ from functools import lru_cache as _lru_cache
6
+ from typing import Any
7
+
8
+ from torch.backends import __allow_nonbracketed_mutation, ContextProp, PropModule
9
+
10
+ try:
11
+ import opt_einsum as _opt_einsum # type: ignore[import]
12
+ except ImportError:
13
+ _opt_einsum = None
14
+
15
+
16
+ @_lru_cache
17
+ def is_available() -> bool:
18
+ r"""Return a bool indicating if opt_einsum is currently available."""
19
+ return _opt_einsum is not None
20
+
21
+
22
+ def get_opt_einsum() -> Any:
23
+ r"""Return the opt_einsum package if opt_einsum is currently available, else None."""
24
+ return _opt_einsum
25
+
26
+
27
+ def _set_enabled(_enabled: bool) -> None:
28
+ if not is_available() and _enabled:
29
+ raise ValueError(
30
+ f"opt_einsum is not available, so setting `enabled` to {_enabled} will not reap "
31
+ "the benefits of calculating an optimal path for einsum. torch.einsum will "
32
+ "fall back to contracting from left to right. To enable this optimal path "
33
+ "calculation, please install opt-einsum."
34
+ )
35
+ global enabled
36
+ enabled = _enabled
37
+
38
+
39
+ def _get_enabled() -> bool:
40
+ return enabled
41
+
42
+
43
+ def _set_strategy(_strategy: str) -> None:
44
+ if not is_available():
45
+ raise ValueError(
46
+ f"opt_einsum is not available, so setting `strategy` to {_strategy} will not be meaningful. "
47
+ "torch.einsum will bypass path calculation and simply contract from left to right. "
48
+ "Please install opt_einsum or unset `strategy`."
49
+ )
50
+ if not enabled:
51
+ raise ValueError(
52
+ f"opt_einsum is not enabled, so setting a `strategy` to {_strategy} will not be meaningful. "
53
+ "torch.einsum will bypass path calculation and simply contract from left to right. "
54
+ "Please set `enabled` to `True` as well or unset `strategy`."
55
+ )
56
+ if _strategy not in ["auto", "greedy", "optimal"]:
57
+ raise ValueError(
58
+ f"`strategy` must be one of the following: [auto, greedy, optimal] but is {_strategy}"
59
+ )
60
+ global strategy
61
+ strategy = _strategy
62
+
63
+
64
+ def _get_strategy() -> str:
65
+ return strategy
66
+
67
+
68
+ def set_flags(_enabled=None, _strategy=None):
69
+ orig_flags = (enabled, None if not is_available() else strategy)
70
+ if _enabled is not None:
71
+ _set_enabled(_enabled)
72
+ if _strategy is not None:
73
+ _set_strategy(_strategy)
74
+ return orig_flags
75
+
76
+
77
+ @contextmanager
78
+ def flags(enabled=None, strategy=None):
79
+ with __allow_nonbracketed_mutation():
80
+ orig_flags = set_flags(enabled, strategy)
81
+ try:
82
+ yield
83
+ finally:
84
+ # recover the previous values
85
+ with __allow_nonbracketed_mutation():
86
+ set_flags(*orig_flags)
87
+
88
+
89
+ # The magic here is to allow us to intercept code like this:
90
+ #
91
+ # torch.backends.opt_einsum.enabled = True
92
+
93
+
94
+ class OptEinsumModule(PropModule):
95
+ def __init__(self, m, name):
96
+ super().__init__(m, name)
97
+
98
+ global enabled
99
+ enabled = ContextProp(_get_enabled, _set_enabled)
100
+ global strategy
101
+ strategy = None
102
+ if is_available():
103
+ strategy = ContextProp(_get_strategy, _set_strategy)
104
+
105
+
106
+ # This is the sys.modules replacement trick, see
107
+ # https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273
108
+ sys.modules[__name__] = OptEinsumModule(sys.modules[__name__], __name__)
109
+
110
+ enabled = True if is_available() else False
111
+ strategy = "auto" if is_available() else None
evalkit_tf446/lib/python3.10/site-packages/torch/backends/opt_einsum/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.46 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/torch/backends/quantized/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.82 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/torch/backends/xeon/__init__.py ADDED
File without changes
evalkit_tf446/lib/python3.10/site-packages/torch/backends/xeon/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (178 Bytes). View file
 
evalkit_tf446/lib/python3.10/site-packages/torch/backends/xeon/__pycache__/run_cpu.cpython-310.pyc ADDED
Binary file (25.9 kB). View file